Explorar o código

libs3 (commit fd8b149044e42, 2017-06-01)

Source commit: 5fa1fd74970a0588128084926904549576e4fca5
Martin Prikryl %!s(int64=8) %!d(string=hai) anos
pai
achega
cdcadf1088
Modificáronse 53 ficheiros con 17452 adicións e 0 borrados
  1. 3 0
      libs/libs3/.gitignore
  2. 165 0
      libs/libs3/COPYING
  3. 16 0
      libs/libs3/ChangeLog
  4. 416 0
      libs/libs3/GNUmakefile
  5. 294 0
      libs/libs3/GNUmakefile.mingw
  6. 309 0
      libs/libs3/GNUmakefile.osx
  7. 73 0
      libs/libs3/INSTALL
  8. 18 0
      libs/libs3/LICENSE
  9. 4 0
      libs/libs3/README
  10. 112 0
      libs/libs3/TODO
  11. 33 0
      libs/libs3/archlinux/PKGBUILD
  12. 5 0
      libs/libs3/debian/changelog
  13. 6 0
      libs/libs3/debian/changelog.Debian
  14. 12 0
      libs/libs3/debian/control
  15. 26 0
      libs/libs3/debian/control.dev
  16. 3 0
      libs/libs3/debian/postinst
  17. 886 0
      libs/libs3/doxyfile
  18. 82 0
      libs/libs3/inc/error_parser.h
  19. 2528 0
      libs/libs3/inc/libs3.h
  20. 45 0
      libs/libs3/inc/mingw/pthread.h
  21. 30 0
      libs/libs3/inc/mingw/sys/select.h
  22. 41 0
      libs/libs3/inc/mingw/sys/utsname.h
  23. 191 0
      libs/libs3/inc/request.h
  24. 43 0
      libs/libs3/inc/request_context.h
  25. 64 0
      libs/libs3/inc/response_headers_handler.h
  26. 76 0
      libs/libs3/inc/simplexml.h
  27. 107 0
      libs/libs3/inc/string_buffer.h
  28. 90 0
      libs/libs3/inc/util.h
  29. 81 0
      libs/libs3/libs3.spec
  30. 27 0
      libs/libs3/mswin/libs3.def
  31. 9 0
      libs/libs3/mswin/rmrf.bat
  32. 774 0
      libs/libs3/src/bucket.c
  33. 602 0
      libs/libs3/src/bucket_metadata.c
  34. 255 0
      libs/libs3/src/error_parser.c
  35. 490 0
      libs/libs3/src/general.c
  36. 119 0
      libs/libs3/src/mingw_functions.c
  37. 37 0
      libs/libs3/src/mingw_s3_functions.c
  38. 1093 0
      libs/libs3/src/multipart.c
  39. 397 0
      libs/libs3/src/object.c
  40. 1754 0
      libs/libs3/src/request.c
  41. 201 0
      libs/libs3/src/request_context.c
  42. 215 0
      libs/libs3/src/response_headers_handler.c
  43. 4040 0
      libs/libs3/src/s3.c
  44. 196 0
      libs/libs3/src/service.c
  45. 563 0
      libs/libs3/src/service_access_logging.c
  46. 207 0
      libs/libs3/src/simplexml.c
  47. 87 0
      libs/libs3/src/testsimplexml.c
  48. 175 0
      libs/libs3/src/util.c
  49. 105 0
      libs/libs3/test/badxml_01.xml
  50. 7 0
      libs/libs3/test/goodxml_01.xml
  51. 105 0
      libs/libs3/test/goodxml_02.xml
  52. 2 0
      libs/libs3/test/goodxml_03.xml
  53. 233 0
      libs/libs3/test/test.sh

+ 3 - 0
libs/libs3/.gitignore

@@ -0,0 +1,3 @@
+build
+build-debug
+

+ 165 - 0
libs/libs3/COPYING

@@ -0,0 +1,165 @@
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.

+ 16 - 0
libs/libs3/ChangeLog

@@ -0,0 +1,16 @@
+Thu Sep 18 10:03:02 NZST 2008   [email protected]
+	* This file is no longer maintained, sorry
+
+Sat Aug  9 13:44:21 NZST 2008   [email protected]
+	* Fixed bug wherein keys with non-URI-safe characters did not work
+	  correctly because they were not being URI-encoded in the request UR
+	* Split RPM and DEB packages into normal and devel packages
+
+Fri Aug  8 22:40:19 NZST 2008	[email protected]
+	* Branched 0.4
+	* Created RPM and Debian packaging
+
+Tue Aug  5 08:52:33 NZST 2008	[email protected]
+	* Bumped version number to 0.3
+	* Moved Makefile to GNUmakefile, added shared library build
+	* Added a bunch of GNU standard files (README, INSTALL, ChangeLog, etc)

+ 416 - 0
libs/libs3/GNUmakefile

@@ -0,0 +1,416 @@
+# GNUmakefile
+#
+# Copyright 2008 Bryan Ischo <[email protected]>
+#
+# This file is part of libs3.
+#
+# libs3 is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, version 3 of the License.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link the code of this library and its programs with the
+# OpenSSL library, and distribute linked combinations including the two.
+#
+# libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# version 3 along with libs3, in a file named COPYING.  If not, see
+# <http://www.gnu.org/licenses/>.
+
+# I tried to use the autoconf/automake/autolocal/etc (i.e. autohell) tools
+# but I just couldn't stomach them.  Since this is a Makefile for POSIX
+# systems, I will simply do away with autohell completely and use a GNU
+# Makefile.  GNU make ought to be available pretty much everywhere, so I
+# don't see this being a significant issue for portability.
+
+# All commands assume a GNU compiler.  For systems which do not use a GNU
+# compiler, write scripts with the same names as these commands, and taking
+# the same arguments, and translate the arguments and commands into the
+# appropriate non-POSIX ones as needed.  libs3 assumes a GNU toolchain as
+# the most portable way to build software possible.  Non-POSIX, non-GNU
+# systems can do the work of supporting this build infrastructure.
+
+
+# --------------------------------------------------------------------------
+# Set libs3 version number, unless it is already set.
+
+LIBS3_VER_MAJOR ?= 4
+LIBS3_VER_MINOR ?= 1
+LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
+
+
+# -----------------------------------------------------------------------------
+# Determine verbosity.  VERBOSE_SHOW should be prepended to every command which
+# should only be displayed if VERBOSE is set.  QUIET_ECHO may be used to
+# echo text only if VERBOSE is not set.  Typically, a VERBOSE_SHOW command will
+# be paired with a QUIET_ECHO command, to provide a command which is displayed
+# in VERBOSE mode, along with text which is displayed in non-VERBOSE mode to
+# describe the command.
+#
+# No matter what VERBOSE is defined to, it ends up as true if it's defined.
+# This will be weird if you defined VERBOSE=false in the environment, and we
+# switch it to true here; but the meaning of VERBOSE is, "if it's defined to
+# any value, then verbosity is turned on".  So don't define VERBOSE if you
+# don't want verbosity in the build process.
+# -----------------------------------------------------------------------------
+
+ifdef VERBOSE
+        VERBOSE = true
+        VERBOSE_ECHO = @ echo
+        VERBOSE_SHOW =
+        QUIET_ECHO = @ echo > /dev/null
+else
+        VERBOSE = false
+        VERBOSE_ECHO = @ echo > /dev/null
+        VERBOSE_SHOW = @
+        QUIET_ECHO = @ echo
+endif
+
+
+# --------------------------------------------------------------------------
+# BUILD directory
+ifndef BUILD
+    ifdef DEBUG
+        BUILD := build-debug
+    else
+        BUILD := build
+    endif
+endif
+
+
+# --------------------------------------------------------------------------
+# DESTDIR directory
+ifndef DESTDIR
+    DESTDIR := /usr
+endif
+
+# --------------------------------------------------------------------------
+# LIBDIR directory
+ifndef LIBDIR
+    LIBDIR := ${DESTDIR}/lib
+endif
+
+# --------------------------------------------------------------------------
+# Compiler CC handling
+ifndef CC
+    CC := gcc
+endif
+
+# --------------------------------------------------------------------------
+# Acquire configuration information for libraries that libs3 depends upon
+
+ifndef CURL_LIBS
+    CURL_LIBS := $(shell curl-config --libs)
+endif
+
+ifndef CURL_CFLAGS
+    CURL_CFLAGS := $(shell curl-config --cflags)
+endif
+
+ifndef LIBXML2_LIBS
+    LIBXML2_LIBS := $(shell xml2-config --libs)
+endif
+
+ifndef LIBXML2_CFLAGS
+    LIBXML2_CFLAGS := $(shell xml2-config --cflags)
+endif
+
+ifndef OPENSSL_LIBS
+    OPENSSL_LIBS := -lssl -lcrypto
+endif
+
+# --------------------------------------------------------------------------
+# These CFLAGS assume a GNU compiler.  For other compilers, write a script
+# which converts these arguments into their equivalent for that particular
+# compiler.
+
+ifndef CFLAGS
+    ifdef DEBUG
+        CFLAGS := -g
+    else
+        CFLAGS := -O3
+    endif
+endif
+
+CFLAGS += -Wall -Werror -Wshadow -Wextra \
+		  -Iinc \
+          $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \
+          -DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \
+          -DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \
+          -DLIBS3_VER=\"$(LIBS3_VER)\" \
+          -D__STRICT_ANSI__ \
+          -D_ISOC99_SOURCE \
+          -D_POSIX_C_SOURCE=200112L
+
+LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) -lpthread
+
+STRIP ?= strip
+INSTALL := install --strip-program=$(STRIP)
+
+
+# --------------------------------------------------------------------------
+# Default targets are everything
+
+.PHONY: all
+all: exported test
+
+
+# --------------------------------------------------------------------------
+# Exported targets are the library and driver program
+
+.PHONY: exported
+exported: libs3 s3 headers
+
+
+# --------------------------------------------------------------------------
+# Install target
+
+.PHONY: install
+install: exported
+	$(QUIET_ECHO) $(DESTDIR)/bin/s3: Installing executable
+	$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rwx,go+rx $(BUILD)/bin/s3 \
+                    $(DESTDIR)/bin/s3
+	$(QUIET_ECHO) \
+        $(LIBDIR)/libs3.so.$(LIBS3_VER): Installing shared library
+	$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rw,go+r \
+               $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
+               $(LIBDIR)/libs3.so.$(LIBS3_VER)
+	$(QUIET_ECHO) \
+        $(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR): Linking shared library
+	$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER) \
+               $(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR)
+	$(QUIET_ECHO) $(LIBDIR)/libs3.so: Linking shared library
+	$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER_MAJOR) $(LIBDIR)/libs3.so
+	$(QUIET_ECHO) $(LIBDIR)/libs3.a: Installing static library
+	$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/lib/libs3.a \
+                    $(LIBDIR)/libs3.a
+	$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
+	$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/include/libs3.h \
+                    $(DESTDIR)/include/libs3.h
+
+
+# --------------------------------------------------------------------------
+# Uninstall target
+
+.PHONY: uninstall
+uninstall:
+	$(QUIET_ECHO) Installed files: Uninstalling
+	$(VERBOSE_SHOW) \
+	    rm -f $(DESTDIR)/bin/s3 \
+              $(DESTDIR)/include/libs3.h \
+              $(DESTDIR)/lib/libs3.a \
+              $(DESTDIR)/lib/libs3.so \
+              $(DESTDIR)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
+              $(DESTDIR)/lib/libs3.so.$(LIBS3_VER)
+
+
+# --------------------------------------------------------------------------
+# Compile target patterns
+
+$(BUILD)/obj/%.o: src/%.c
+	$(QUIET_ECHO) $@: Compiling object
+	@ mkdir -p $(dir $(BUILD)/dep/$<)
+	@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
+        -o $(BUILD)/dep/$(<:%.c=%.d) -c $<
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(CC) $(CFLAGS) -o $@ -c $<
+
+$(BUILD)/obj/%.do: src/%.c
+	$(QUIET_ECHO) $@: Compiling dynamic object
+	@ mkdir -p $(dir $(BUILD)/dep/$<)
+	@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
+        -o $(BUILD)/dep/$(<:%.c=%.dd) -c $<
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(CC) $(CFLAGS) -fpic -fPIC -o $@ -c $<
+
+
+# --------------------------------------------------------------------------
+# libs3 library targets
+
+LIBS3_SHARED = $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR)
+LIBS3_STATIC = $(BUILD)/lib/libs3.a
+
+.PHONY: libs3
+libs3: $(LIBS3_SHARED) $(LIBS3_STATIC)
+
+LIBS3_SOURCES := bucket.c bucket_metadata.c error_parser.c general.c \
+                 object.c request.c request_context.c \
+                 response_headers_handler.c service_access_logging.c \
+                 service.c simplexml.c util.c multipart.c
+
+$(LIBS3_SHARED): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.do)
+	$(QUIET_ECHO) $@: Building shared library
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(CC) -shared -Wl,-soname,libs3.so.$(LIBS3_VER_MAJOR) \
+        -o $@ $^ $(LDFLAGS)
+
+$(LIBS3_STATIC): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.o)
+	$(QUIET_ECHO) $@: Building static library
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(AR) cr $@ $^
+
+
+# --------------------------------------------------------------------------
+# Driver program targets
+
+.PHONY: s3
+s3: $(BUILD)/bin/s3
+
+$(BUILD)/bin/s3: $(BUILD)/obj/s3.o $(LIBS3_SHARED)
+	$(QUIET_ECHO) $@: Building executable
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LDFLAGS)
+
+
+# --------------------------------------------------------------------------
+# libs3 header targets
+
+.PHONY: headers
+headers: $(BUILD)/include/libs3.h
+
+$(BUILD)/include/libs3.h: inc/libs3.h
+	$(QUIET_ECHO) $@: Linking header
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) ln -sf $(abspath $<) $@
+
+
+# --------------------------------------------------------------------------
+# Test targets
+
+.PHONY: test
+test: $(BUILD)/bin/testsimplexml
+
+$(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o $(LIBS3_STATIC)
+	$(QUIET_ECHO) $@: Building executable
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LIBXML2_LIBS)
+
+
+# --------------------------------------------------------------------------
+# Clean target
+
+.PHONY: clean
+clean:
+	$(QUIET_ECHO) $(BUILD): Cleaning
+	$(VERBOSE_SHOW) rm -rf $(BUILD)
+
+.PHONY: distclean
+distclean:
+	$(QUIET_ECHO) $(BUILD): Cleaning
+	$(VERBOSE_SHOW) rm -rf $(BUILD)
+
+
+# --------------------------------------------------------------------------
+# Clean dependencies target
+
+.PHONY: cleandeps
+cleandeps:
+	$(QUIET_ECHO) $(BUILD)/dep: Cleaning dependencies
+	$(VERBOSE_SHOW) rm -rf $(BUILD)/dep
+
+
+# --------------------------------------------------------------------------
+# Dependencies
+
+ALL_SOURCES := $(LIBS3_SOURCES) s3.c testsimplexml.c
+
+$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.d)))
+$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.dd)))
+
+
+# --------------------------------------------------------------------------
+# Debian package target
+
+DEBPKG = $(BUILD)/pkg/libs3_$(LIBS3_VER).deb
+DEBDEVPKG = $(BUILD)/pkg/libs3-dev_$(LIBS3_VER).deb
+
+.PHONY: deb
+deb: $(DEBPKG) $(DEBDEVPKG)
+
+$(DEBPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
+                       cut -d '=' -f 2)
+$(DEBPKG): exported $(BUILD)/deb/DEBIAN/control $(BUILD)/deb/DEBIAN/shlibs \
+           $(BUILD)/deb/DEBIAN/postinst \
+           $(BUILD)/deb/usr/share/doc/libs3/changelog.gz \
+           $(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz \
+           $(BUILD)/deb/usr/share/doc/libs3/copyright
+	DESTDIR=$(BUILD)/deb/usr $(MAKE) install
+	rm -rf $(BUILD)/deb/usr/include
+	rm -f $(BUILD)/deb/usr/lib/libs3.a
+	@mkdir -p $(dir $@)
+	fakeroot dpkg-deb -b $(BUILD)/deb $@
+	mv $@ $(BUILD)/pkg/libs3_$(LIBS3_VER)_$(DEBARCH).deb
+
+$(DEBDEVPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
+                          cut -d '=' -f 2)
+$(DEBDEVPKG): exported $(BUILD)/deb-dev/DEBIAN/control \
+           $(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz \
+           $(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz \
+           $(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright
+	DESTDIR=$(BUILD)/deb-dev/usr $(MAKE) install
+	rm -rf $(BUILD)/deb-dev/usr/bin
+	rm -f $(BUILD)/deb-dev/usr/lib/libs3.so*
+	@mkdir -p $(dir $@)
+	fakeroot dpkg-deb -b $(BUILD)/deb-dev $@
+	mv $@ $(BUILD)/pkg/libs3-dev_$(LIBS3_VER)_$(DEBARCH).deb
+
+$(BUILD)/deb/DEBIAN/control: debian/control
+	@mkdir -p $(dir $@)
+	echo -n "Depends: " > $@
+	dpkg-shlibdeps -Sbuild -O $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) | \
+            cut -d '=' -f 2- >> $@
+	sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
+            < $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' | \
+            grep -v ^Source: >> $@
+
+$(BUILD)/deb-dev/DEBIAN/control: debian/control.dev
+	@mkdir -p $(dir $@)
+	sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
+            < $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' > $@
+
+$(BUILD)/deb/DEBIAN/shlibs:
+	echo -n "libs3 $(LIBS3_VER_MAJOR) libs3 " > $@
+	echo "(>= $(LIBS3_VER))" >> $@
+
+$(BUILD)/deb/DEBIAN/postinst: debian/postinst
+	@mkdir -p $(dir $@)
+	cp $< $@
+
+$(BUILD)/deb/usr/share/doc/libs3/copyright: LICENSE
+	@mkdir -p $(dir $@)
+	cp $< $@
+	@echo >> $@
+	@echo -n "An alternate location for the GNU General Public " >> $@
+	@echo "License version 3 on Debian" >> $@
+	@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
+
+$(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright: LICENSE
+	@mkdir -p $(dir $@)
+	cp $< $@
+	@echo >> $@
+	@echo -n "An alternate location for the GNU General Public " >> $@
+	@echo "License version 3 on Debian" >> $@
+	@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
+
+$(BUILD)/deb/usr/share/doc/libs3/changelog.gz: debian/changelog
+	@mkdir -p $(dir $@)
+	gzip --best -c $< > $@
+
+$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz: debian/changelog
+	@mkdir -p $(dir $@)
+	gzip --best -c $< > $@
+
+$(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz: debian/changelog.Debian
+	@mkdir -p $(dir $@)
+	gzip --best -c $< > $@
+
+$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz: \
+    debian/changelog.Debian
+	@mkdir -p $(dir $@)
+	gzip --best -c $< > $@
+
+

+ 294 - 0
libs/libs3/GNUmakefile.mingw

@@ -0,0 +1,294 @@
+# GNUmakefile.mingw
+#
+# Copyright 2008 Bryan Ischo <[email protected]>
+#
+# This file is part of libs3.
+#
+# libs3 is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, version 3 of the License.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link the code of this library and its programs with the
+# OpenSSL library, and distribute linked combinations including the two.
+#
+# libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# version 3 along with libs3, in a file named COPYING.  If not, see
+# <http://www.gnu.org/licenses/>.
+
+# I tried to use the autoconf/automake/autolocal/etc (i.e. autohell) tools
+# but I just couldn't stomach them.  Since this is a Makefile for POSIX
+# systems, I will simply do away with autohell completely and use a GNU
+# Makefile.  GNU make ought to be available pretty much everywhere, so I
+# don't see this being a significant issue for portability.
+
+# All commands assume a GNU compiler.  For systems which do not use a GNU
+# compiler, write scripts with the same names as these commands, and taking
+# the same arguments, and translate the arguments and commands into the
+# appropriate non-POSIX ones as needed.  libs3 assumes a GNU toolchain as
+# the most portable way to build software possible.  Non-POSIX, non-GNU
+# systems can do the work of supporting this build infrastructure.
+
+
+# --------------------------------------------------------------------------
+# Set libs3 version number, unless it is already set.
+
+LIBS3_VER_MAJOR ?= 4
+LIBS3_VER_MINOR ?= 1
+LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
+
+
+# -----------------------------------------------------------------------------
+# Determine verbosity.  VERBOSE_SHOW should be prepended to every command which
+# should only be displayed if VERBOSE is set.  QUIET_ECHO may be used to
+# echo text only if VERBOSE is not set.  Typically, a VERBOSE_SHOW command will
+# be paired with a QUIET_ECHO command, to provide a command which is displayed
+# in VERBOSE mode, along with text which is displayed in non-VERBOSE mode to
+# describe the command.
+#
+# No matter what VERBOSE is defined to, it ends up as true if it's defined.
+# This will be weird if you defined VERBOSE=false in the environment, and we
+# switch it to true here; but the meaning of VERBOSE is, "if it's defined to
+# any value, then verbosity is turned on".  So don't define VERBOSE if you
+# don't want verbosity in the build process.
+# -----------------------------------------------------------------------------
+
+ifdef VERBOSE
+        VERBOSE = true
+        VERBOSE_ECHO = @ echo
+        VERBOSE_SHOW =
+        QUIET_ECHO = @ echo >nul
+else
+        VERBOSE = false
+        VERBOSE_ECHO = @ echo >nul
+        VERBOSE_SHOW = @
+        QUIET_ECHO = @ echo
+endif
+
+
+# --------------------------------------------------------------------------
+# BUILD directory
+ifndef BUILD
+    ifdef DEBUG
+        BUILD := build-debug
+    else
+        BUILD := build
+    endif
+endif
+
+
+# --------------------------------------------------------------------------
+# DESTDIR directory
+ifndef DESTDIR
+    DESTDIR := libs3-$(LIBS3_VER)
+endif
+
+
+# --------------------------------------------------------------------------
+# Acquire configuration information for libraries that libs3 depends upon
+
+ifndef CURL_LIBS
+    CURL_LIBS := -Lc:\libs3-libs\bin -lcurl
+endif
+
+ifndef CURL_CFLAGS
+    CURL_CFLAGS := -Ic:\libs3-libs\include
+endif
+
+ifndef LIBXML2_LIBS
+    LIBXML2_LIBS := -Lc:\libs3-libs\bin -lxml2
+endif
+
+ifndef LIBXML2_CFLAGS
+    LIBXML2_CFLAGS := -Ic:\libs3-libs\include
+endif
+
+
+# --------------------------------------------------------------------------
+# These CFLAGS assume a GNU compiler.  For other compilers, write a script
+# which converts these arguments into their equivalent for that particular
+# compiler.
+
+ifndef CFLAGS
+    ifdef DEBUG
+        CFLAGS := -g
+    else
+        CFLAGS := -O3
+    endif
+endif
+
+CFLAGS += -Wall -Werror -Wshadow -Wextra -Iinc \
+          $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \
+          -DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \
+          -DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \
+          -DLIBS3_VER=\"$(LIBS3_VER)\" \
+          -D__STRICT_ANSI__ \
+          -D_ISOC99_SOURCE \
+          -D_POSIX_C_SOURCE=200112L \
+          -Dsleep=Sleep -DSLEEP_UNITS_PER_SECOND=1000 \
+          -DFOPEN_EXTRA_FLAGS=\"b\" \
+          -Iinc/mingw -include windows.h
+
+LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS)
+
+# --------------------------------------------------------------------------
+# Default targets are everything
+
+.PHONY: all
+all: exported test
+
+
+# --------------------------------------------------------------------------
+# Exported targets are the library and driver program
+
+.PHONY: exported
+exported: libs3 s3 headers
+
+
+# --------------------------------------------------------------------------
+# Install target
+
+.PHONY: install
+install: exported
+	$(QUIET_ECHO) $(DESTDIR)/bin/s3.exe: Installing executable
+	- @ mkdir $(DESTDIR)\bin 2>&1 | echo >nul
+	$(VERBOSE_SHOW) copy $(BUILD)\bin\s3.exe $(DESTDIR)\bin\s3.exe >nul
+	$(QUIET_ECHO) $(DESTDIR)/bin/libs3/dll: Installing dynamic library
+	$(VERBOSE_SHOW) copy $(BUILD)\bin\libs3.dll $(DESTDIR)\bin\libs3.dll >nul
+	$(QUIET_ECHO) $(DESTDIR)/lib/libs3.a: Installing static library
+	- @ mkdir $(DESTDIR)\lib 2>&1 | echo >nul
+	$(VERBOSE_SHOW) copy $(BUILD)\lib\libs3.a $(DESTDIR)\lib\libs3.a >nul
+	$(QUIET_ECHO) $(DESTDIR)/lib/libs3.def: Installing def file
+	$(VERBOSE_SHOW) copy mswin\libs3.def $(DESTDIR)\lib\libs3.def >nul
+	- @ mkdir $(DESTDIR)\include 2>&1 | echo >nul
+	$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Copying header
+	$(VERBOSE_SHOW) copy $(BUILD)\include\libs3.h \
+                    $(DESTDIR)\include\libs3.h >nul
+	$(QUIET_ECHO) $(DESTDIR)/LICENSE: Copying license
+	$(VERBOSE_SHOW) copy LICENSE $(DESTDIR)\LICENSE >nul
+	$(QUIET_ECHO) $(DESTDIR)/COPYING: Copying license
+	$(VERBOSE_SHOW) copy COPYING $(DESTDIR)\COPYING >nul
+
+
+# --------------------------------------------------------------------------
+# Uninstall target
+
+.PHONY: uninstall
+uninstall:
+	$(QUIET_ECHO) Installed files: Uninstalling
+	$(VERBOSE_SHOW) \
+	    del $(DESTDIR)\bin\s3.exe \
+            $(DESTDIR)\bin\libs3.dll \
+            $(DESTDIR)\lib\libs3.a \
+            $(DESTDIR)\lib\libs3.def \
+            $(DESTDIR)\include\libs3.h \
+            $(DESTDIR)\LICENSE \
+            $(DESTDIR)\COPYING
+
+
+# --------------------------------------------------------------------------
+# Compile target patterns
+
+$(BUILD)/obj/%.o: src/%.c
+	$(QUIET_ECHO) $@: Compiling object
+	- @ mkdir $(subst /,\,$(dir $(BUILD)/dep/$<)) 2>&1 | echo >nul
+	@ gcc $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
+        -o $(BUILD)/dep/$(<:%.c=%.d) -c $<
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) gcc $(CFLAGS) -o $@ -c $<
+
+
+# --------------------------------------------------------------------------
+# libs3 library targets
+
+LIBS3_SHARED = $(BUILD)/bin/libs3.dll
+LIBS3_STATIC = $(BUILD)/lib/libs3.a
+
+.PHONY: libs3
+libs3: $(LIBS3_SHARED) $(BUILD)/lib/libs3.a
+
+LIBS3_SOURCES := src/bucket.c src/bucket_metadata.c src/error_parser.c src/general.c \
+                 src/object.c src/request.c src/request_context.c \
+                 src/response_headers_handler.c src/service_access_logging.c \
+                 src/service.c src/simplexml.c src/util.c src/multipart.c \
+                 src/mingw_functions.c
+
+$(LIBS3_SHARED): $(LIBS3_SOURCES:src/%.c=$(BUILD)/obj/%.o)
+	$(QUIET_ECHO) $@: Building dynamic library
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) gcc -shared -o $@ $^ $(LDFLAGS) -lws2_32
+
+$(LIBS3_STATIC): $(LIBS3_SHARED)
+	$(QUIET_ECHO) $@: Building static library
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) dlltool --def mswin\libs3.def --dllname $(subst /,\,$<) \
+            --output-lib $(subst /,\,$@)
+
+
+# --------------------------------------------------------------------------
+# Driver program targets
+
+.PHONY: s3
+s3: $(BUILD)/bin/s3.exe
+
+$(BUILD)/bin/s3.exe: $(BUILD)/obj/s3.o $(BUILD)/obj/mingw_s3_functions.o \
+                     $(BUILD)/lib/libs3.a
+	$(QUIET_ECHO) $@: Building executable
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) gcc -o $@ $^ $(LDFLAGS) -lws2_32
+
+
+# --------------------------------------------------------------------------
+# libs3 header targets
+
+.PHONY: headers
+headers: $(BUILD)\include\libs3.h
+
+$(BUILD)\include\libs3.h: inc\libs3.h
+	$(QUIET_ECHO) $@: Copying header
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) copy $< $@
+
+
+# --------------------------------------------------------------------------
+# Test targets
+
+.PHONY: test
+test: $(BUILD)/bin/testsimplexml
+
+$(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o \
+                            $(BUILD)/obj/simplexml.o
+	$(QUIET_ECHO) $@: Building executable
+	- @ mkdir $(subst /,\,$(dir $@)) 2>&1 | echo >nul
+	$(VERBOSE_SHOW) gcc -o $@ $^ $(LIBXML2_LIBS)
+
+
+# --------------------------------------------------------------------------
+# Clean target
+
+.PHONY: clean
+clean:
+	$(QUIET_ECHO) $(BUILD): Cleaning
+	$(VERBOSE_SHOW) mswin\rmrf.bat $(BUILD)
+
+
+# --------------------------------------------------------------------------
+# Clean dependencies target
+
+.PHONY: cleandeps
+cleandeps:
+	$(QUIET_ECHO) $(BUILD)/dep: Cleaning dependencies
+	$(VERBOSE_SHOW) mswin\rmrf.bat $(BUILD)\dep
+
+
+# --------------------------------------------------------------------------
+# Dependencies
+
+ALL_SOURCES := $(LIBS3_SOURCES) s3.c testsimplexml.c
+
+$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.d)))

+ 309 - 0
libs/libs3/GNUmakefile.osx

@@ -0,0 +1,309 @@
+# GNUmakefile.osx
+#
+# Copyright 2008 Bryan Ischo <[email protected]>
+#
+# This file is part of libs3.
+#
+# libs3 is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, version 3 of the License.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link the code of this library and its programs with the
+# OpenSSL library, and distribute linked combinations including the two.
+#
+# libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# version 3 along with libs3, in a file named COPYING.  If not, see
+# <http://www.gnu.org/licenses/>.
+
+# I tried to use the autoconf/automake/autolocal/etc (i.e. autohell) tools
+# but I just couldn't stomach them.  Since this is a Makefile for POSIX
+# systems, I will simply do away with autohell completely and use a GNU
+# Makefile.  GNU make ought to be available pretty much everywhere, so I
+# don't see this being a significant issue for portability.
+
+# All commands assume a GNU compiler.  For systems which do not use a GNU
+# compiler, write scripts with the same names as these commands, and taking
+# the same arguments, and translate the arguments and commands into the
+# appropriate non-POSIX ones as needed.  libs3 assumes a GNU toolchain as
+# the most portable way to build software possible.  Non-POSIX, non-GNU
+# systems can do the work of supporting this build infrastructure.
+
+
+# --------------------------------------------------------------------------
+# Set libs3 version number, unless it is already set.
+
+LIBS3_VER_MAJOR ?= 4
+LIBS3_VER_MINOR ?= 1
+LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
+
+
+# -----------------------------------------------------------------------------
+# Determine verbosity.  VERBOSE_SHOW should be prepended to every command which
+# should only be displayed if VERBOSE is set.  QUIET_ECHO may be used to
+# echo text only if VERBOSE is not set.  Typically, a VERBOSE_SHOW command will
+# be paired with a QUIET_ECHO command, to provide a command which is displayed
+# in VERBOSE mode, along with text which is displayed in non-VERBOSE mode to
+# describe the command.
+#
+# No matter what VERBOSE is defined to, it ends up as true if it's defined.
+# This will be weird if you defined VERBOSE=false in the environment, and we
+# switch it to true here; but the meaning of VERBOSE is, "if it's defined to
+# any value, then verbosity is turned on".  So don't define VERBOSE if you
+# don't want verbosity in the build process.
+# -----------------------------------------------------------------------------
+
+ifdef VERBOSE
+        VERBOSE = true
+        VERBOSE_ECHO = @ echo
+        VERBOSE_SHOW =
+        QUIET_ECHO = @ echo > /dev/null
+else
+        VERBOSE = false
+        VERBOSE_ECHO = @ echo > /dev/null
+        VERBOSE_SHOW = @
+        QUIET_ECHO = @ echo
+endif
+
+
+# --------------------------------------------------------------------------
+# BUILD directory
+ifndef BUILD
+    ifdef DEBUG
+        BUILD := build-debug
+    else
+        BUILD := build
+    endif
+endif
+
+
+# --------------------------------------------------------------------------
+# DESTDIR directory
+ifndef DESTDIR
+    DESTDIR := /usr
+endif
+
+
+# --------------------------------------------------------------------------
+# Acquire configuration information for libraries that libs3 depends upon
+
+ifndef CURL_LIBS
+    CURL_LIBS := $(shell curl-config --libs)
+endif
+
+ifndef CURL_CFLAGS
+    CURL_CFLAGS := $(shell curl-config --cflags)
+endif
+
+ifndef LIBXML2_LIBS
+    LIBXML2_LIBS := $(shell xml2-config --libs)
+    # Work around missing libsystem_symptoms.dylib in Xcode 8; see
+    # http://stackoverflow.com/questions/39536144/libsystem-symptoms-dylib-missing-in-xcode-8
+    LIBXML2_LIBS := $(filter-out -L$(shell xcrun --show-sdk-path)/usr/lib, $(LIBXML2_LIBS))
+endif
+
+ifndef LIBXML2_CFLAGS
+    LIBXML2_CFLAGS := $(shell xml2-config --cflags)
+endif
+
+
+# --------------------------------------------------------------------------
+# These CFLAGS assume a GNU compiler.  For other compilers, write a script
+# which converts these arguments into their equivalent for that particular
+# compiler.
+
+ifndef CFLAGS
+    ifdef DEBUG
+        CFLAGS := -g
+    else
+        CFLAGS := -O3
+    endif
+endif
+
+# --------------------------------------------------------------------------
+# -Werror breaks the build on the macOS Sierra rendering build unusable 
+# so we use -Wunused-parameter flag instead that will only issue a warning
+# with the newest clang compiler
+
+CFLAGS += -Wall -Wunused-parameter -Wshadow -Wextra -Iinc \
+          $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \
+          -DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \
+          -DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \
+          -DLIBS3_VER=\"$(LIBS3_VER)\" \
+          -D__STRICT_ANSI__ \
+          -D_ISOC99_SOURCE \
+          -fno-common
+
+LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) -lpthread
+
+
+# --------------------------------------------------------------------------
+# Default targets are everything
+
+.PHONY: all
+all: exported test
+
+
+# --------------------------------------------------------------------------
+# Exported targets are the library and driver program
+
+.PHONY: exported
+exported: libs3 s3 headers
+
+
+# --------------------------------------------------------------------------
+# Install target
+
+.PHONY: install
+install: exported
+	$(QUIET_ECHO) $(DESTDIR)/bin/s3: Installing executable
+	$(VERBOSE_SHOW) install -ps -m u+rwx,go+rx $(BUILD)/bin/s3 \
+                    $(DESTDIR)/bin/s3
+	$(QUIET_ECHO) \
+        $(DESTDIR)/lib/libs3.$(LIBS3_VER).dylib: Installing dynamic library
+	$(VERBOSE_SHOW) install -p -m u+rw,go+r \
+                    $(BUILD)/lib/libs3.$(LIBS3_VER_MAJOR).dylib \
+                    $(DESTDIR)/lib/libs3.$(LIBS3_VER).dylib
+	$(QUIET_ECHO) \
+        $(DESTDIR)/lib/libs3.$(LIBS3_VER_MAJOR).dylib: Linking dynamic library
+	$(VERBOSE_SHOW) ln -sf libs3.$(LIBS3_VER).dylib \
+                    $(DESTDIR)/lib/libs3.$(LIBS3_VER_MAJOR).dylib
+	$(QUIET_ECHO) $(DESTDIR)/lib/libs3.dylib: Linking dynamic library
+	$(VERBOSE_SHOW) ln -sf libs3.$(LIBS3_VER_MAJOR).dylib \
+                    $(DESTDIR)/lib/libs3.dylib
+	$(QUIET_ECHO) $(DESTDIR)/lib/libs3.a: Installing static library
+	$(VERBOSE_SHOW) install -p -m u+rw,go+r $(BUILD)/lib/libs3.a \
+                    $(DESTDIR)/lib/libs3.a
+	$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
+	$(VERBOSE_SHOW) install -p -m u+rw,go+r $(BUILD)/include/libs3.h \
+                    $(DESTDIR)/include/libs3.h
+
+
+# --------------------------------------------------------------------------
+# Uninstall target
+
+.PHONY: uninstall
+uninstall:
+	$(QUIET_ECHO) Installed files: Uninstalling
+	$(VERBOSE_SHOW) \
+        rm -f $(DESTDIR)/bin/s3 \
+              $(DESTDIR)/lib/libs3.dylib \
+              $(DESTDIR)/lib/libs3.$(LIBS3_VER_MAJOR).dylib \
+              $(DESTDIR)/lib/libs3.$(LIBS3_VER).dylib \
+              $(DESTDIR)/lib/libs3.a \
+              $(DESTDIR)/include/libs3.h
+
+
+# --------------------------------------------------------------------------
+# Compile target patterns
+
+$(BUILD)/obj/%.o: src/%.c
+	$(QUIET_ECHO) $@: Compiling object
+	@ mkdir -p $(dir $(BUILD)/dep/$<)
+	@ gcc $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
+        -o $(BUILD)/dep/$(<:%.c=%.d) -c $<
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) gcc $(CFLAGS) -o $@ -c $<
+
+$(BUILD)/obj/%.do: src/%.c
+	$(QUIET_ECHO) $@: Compiling dynamic object
+	@ mkdir -p $(dir $(BUILD)/dep/$<)
+	@ gcc $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
+        -o $(BUILD)/dep/$(<:%.c=%.dd) -c $<
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) gcc $(CFLAGS) -fpic -fPIC -o $@ -c $<
+
+
+# --------------------------------------------------------------------------
+# libs3 library targets
+
+LIBS3_SHARED = $(BUILD)/lib/libs3.$(LIBS3_VER_MAJOR).dylib
+LIBS3_STATIC = $(BUILD)/lib/libs3.a
+
+.PHONY: libs3
+libs3: $(LIBS3_SHARED) $(LIBS3_SHARED_MAJOR) $(BUILD)/lib/libs3.a
+
+LIBS3_SOURCES := src/bucket.c src/bucket_metadata.c src/error_parser.c src/general.c \
+                 src/object.c src/request.c src/request_context.c \
+                 src/response_headers_handler.c src/service_access_logging.c \
+                 src/service.c src/simplexml.c src/util.c src/multipart.c
+
+$(LIBS3_SHARED): $(LIBS3_SOURCES:src/%.c=$(BUILD)/obj/%.do)
+	$(QUIET_ECHO) $@: Building shared library
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) gcc -dynamiclib -install_name \
+        libs3.$(LIBS3_VER_MAJOR).dylib \
+        -compatibility_version $(LIBS3_VER_MAJOR) \
+        -current_version $(LIBS3_VER) -o $@ $^ $(LDFLAGS)
+
+$(LIBS3_STATIC): $(LIBS3_SOURCES:src/%.c=$(BUILD)/obj/%.o)
+	$(QUIET_ECHO) $@: Building static library
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) $(AR) cr $@ $^
+
+
+# --------------------------------------------------------------------------
+# Driver program targets
+
+.PHONY: s3
+s3: $(BUILD)/bin/s3
+
+$(BUILD)/bin/s3: $(BUILD)/obj/s3.o $(LIBS3_SHARED)
+	$(QUIET_ECHO) $@: Building executable
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) gcc -o $@ $^ $(LDFLAGS)
+
+
+# --------------------------------------------------------------------------
+# libs3 header targets
+
+.PHONY: headers
+headers: $(BUILD)/include/libs3.h
+
+$(BUILD)/include/libs3.h: inc/libs3.h
+	$(QUIET_ECHO) $@: Linking header
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) ln -sf $(abspath $<) $@
+
+
+# --------------------------------------------------------------------------
+# Test targets
+
+.PHONY: test
+test: $(BUILD)/bin/testsimplexml
+
+$(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o $(LIBS3_STATIC)
+	$(QUIET_ECHO) $@: Building executable
+	@ mkdir -p $(dir $@)
+	$(VERBOSE_SHOW) gcc -o $@ $^ $(LIBXML2_LIBS)
+
+# --------------------------------------------------------------------------
+# Clean target
+
+.PHONY: clean
+clean:
+	$(QUIET_ECHO) $(BUILD): Cleaning
+	$(VERBOSE_SHOW) rm -rf $(BUILD)
+
+
+# --------------------------------------------------------------------------
+# Clean dependencies target
+
+.PHONY: cleandeps
+cleandeps:
+	$(QUIET_ECHO) $(BUILD)/dep: Cleaning dependencies
+	$(VERBOSE_SHOW) rm -rf $(BUILD)/dep
+
+
+# --------------------------------------------------------------------------
+# Dependencies
+
+ALL_SOURCES := $(LIBS3_SOURCES) s3.c testsimplexml.c
+
+$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.d)))
+$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.dd)))

+ 73 - 0
libs/libs3/INSTALL

@@ -0,0 +1,73 @@
+
+To install libs3 on a POSIX system (except Microsoft Windows):
+--------------------------------------------------------------
+
+Note that all POSIX builds have prerequisites, such as development libraries
+that libs3 requires and that must be installed at the time that libs3 is
+built.  The easiest way to find out what those are, is to run the build
+command and then observe the results.
+
+*** For RPM-based systems (Fedora Core, Mandrake, etc) ***
+
+* rpmbuild -ta <libs3 archive>
+
+for example:
+
+rpmbuild -ta libs3-0.3.tar.gz
+
+
+*** For dpkg-based systems (Debian, Ubuntu, etc) ***
+
+* make deb
+
+This will produce a Debian package in the build/pkg directory.
+
+
+*** For all other systems ***
+
+* make [DESTDIR=destination root] install
+
+DESTDIR defaults to /usr
+
+
+To install libs3 on a Microsoft Windows system:
+-----------------------------------------------
+
+*** Using MingW ***
+
+* libs3 can be built on Windows using the MingW compiler.  No other tool
+  is needed.  However, the following libraries are needed to build libs3:
+
+  - curl development libraries
+  - libxml2 development libraries, and the libraries that it requires:
+    - iconv
+    - zlib
+
+  These projects are independent of libs3, and their release schedule and
+  means of distribution would make it very difficult to provide links to
+  the files to download and keep them up-to-date in this file, so no attempt
+  is made here.
+
+  Development libraries and other files can be placed in:
+  c:\libs3-libs\bin
+  c:\libs3-libs\include
+
+  If the above locations are used, then the GNUmakefile.mingw will work with
+  no special caveats.  If the above locations are not used, then the following
+  environment variables should be set:
+  CURL_LIBS should be set to the MingW compiler flags needed to locate and
+      link in the curl libraries
+  CURL_CFLAGS should be set to the MingW compiler flags needed to locate and
+      include the curl headers
+  LIBXML2_LIBS should be set to the MingW compiler flags needed to locate and
+      link in the libxml2 libraries
+  LIBXML2_CFLAGS should be set to the MingW compiler flags needed to locate and
+      include the libxml2 headers
+
+* mingw32-make [DESTDIR=destination] -f GNUmakefile.mingw install
+
+DESTDIR defaults to libs3-<version>
+
+* DESTDIR can be zipped up into a .zip file for distribution.  For best
+  results, the dependent libraries (curl, openssl, etc) should be included,
+  along with their licenses.

+ 18 - 0
libs/libs3/LICENSE

@@ -0,0 +1,18 @@
+Copyright 2008 Bryan Ischo <[email protected]>
+
+libs3 is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, version 3 of the License.
+
+In addition, as a special exception, the copyright holders give
+permission to link the code of this library and its programs with the
+OpenSSL library, and distribute linked combinations including the two.
+
+libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License
+version 3 along with libs3, in a file named COPYING.  If not, see
+<http://www.gnu.org/licenses/>.

+ 4 - 0
libs/libs3/README

@@ -0,0 +1,4 @@
+This directory contains the libs3 library.
+
+The libs3 library is free software.  See the file LICENSE for copying
+permission.

+ 112 - 0
libs/libs3/TODO

@@ -0,0 +1,112 @@
+* Implement functions for generating form stuff for posting to s3
+
+* Write s3 man page
+
+=== Object Versioning ===
+
+- Enabling versioning is just a PUT request to a specific object name with a canned XML content (3 hours)
+
+- Suspending versioning is another similar PUT request (1 hour)
+
+- Get versioning state is a simple request, requires some simple XML parsing to represent the result (2 hours)
+
+- Support reporting the version ID in PUT, POST, or COPY request results (1 hour)
+
+- List all object versions uses a GET request for a specific sub-resource (2 hours)
+
+- Include support for "truncated" object versions list (1 hour)
+
+- Support versionId in GET and HEAD requests (2 hours)
+
+
+=== Reduced Redundancy Storage ===
+
+- Support adding a "storage class" header to define storage class in PUT request (2 hours)
+
+- Documentation note that S3StatusErrorMethodNotAllowed is returned when there has been a loss of a reduced redundancy object (1 hour)
+
+- Consider adding an API for changing the storage class of an object (even though this is already covered by doing a PUT with the appropriate user-controlled parameters) (1 hour)
+
+
+=== Bucket Policies ===
+
+- Support parsing/deparsing of bucket policies (JSON parsing!) into structured representation - 26 hours
+
+- Set/delete/query bucket policies - 12 hours
+
+
+=== Notifications ===
+
+- Structurally represent and provide an API for setting/deleting/querying: 16 hours
+
+
+=== IAM ===
+
+This is Amazon's "user" management API; not part of S3 although IAM users can be referenced in S3 Bucket Policies.  Not intending to support IAM account management since the creation and management of users is not S3 functionality.
+
+
+=== Response Header API Support ===
+
+- Allows HTTP response headers to be specified on a per-object basis
+
+- Full support: 5 hours
+
+
+=== Support for Hosting static websites in Amazon S3 ===
+
+- Structured representation of website routing rules: 8 hours
+
+- Support the "website" bucket resource and structured interactions with it: 16 hours
+
+
+=== Multipart Upload Copy ===
+
+- 10 hours
+
+
+=== Temporary Security Credentials ===
+
+- ???
+
+
+=== Server-Side Encryption ===
+
+- Just have to set an additional header in some of the requests
+
+- 4 hours
+
+
+=== Multi-Object Delete ===
+
+- POST /?delete -- 4 hours
+
+
+=== MFA Authentication ===
+
+(part of Bucket Policy)
+
+
+=== Cost Allocation Tagging ===
+
+- Looks like just a simple free-form string value that can be associated with buckets
+
+- May need to implement special logging for querying by cost allocation tag
+
+- Need to find the REST API documentation for this
+
+- Likely to take ~8 hours to support
+
+
+=== Cross-Origin Resource Sharing ===
+
+- Support /?cors resource: 16 hours
+
+
+=== Support for Archiving Data to Amazon Glacier ===
+
+- Already included in Object Lifecycle Management
+
+
+=== Root domain support for website hosting ===
+
+- Already included in static website support stuff

+ 33 - 0
libs/libs3/archlinux/PKGBUILD

@@ -0,0 +1,33 @@
+# Contributor: Bryan Ischo <[email protected]>
+pkgname=libs3-git
+_gitname=libs3
+pkgver=master
+pkgrel=1
+pkgdesc="C Library and Tools for Amazon S3 Access"
+arch=('i686' 'x86_64')
+url="https://github.com/bji/libs3"
+license=('GPL')
+groups=()
+depends=('libxml2' 'openssl' 'curl')
+makedepends=('make' 'libxml2' 'openssl' 'curl')
+provides=()
+conflicts=()
+replaces=()
+backup=()
+options=()
+install=
+source=("git://github.com/bji/${_gitname}.git")
+noextract=()
+md5sums=('SKIP')
+
+
+pkgver() {
+  cd "$_gitname"
+  echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
+}
+
+package() {
+  cd "$_gitname"
+  DESTDIR=$pkgdir/usr make install || return 1
+}
+# vim:set ts=2 sw=2 et:

+ 5 - 0
libs/libs3/debian/changelog

@@ -0,0 +1,5 @@
+libs3 (all) unstable; urgency=low
+
+  * This file is not maintained.  See project source code for changes.
+
+ -- Bryan Ischo <[email protected]>  Wed, 06 Aug 2008 09:36:43 -0400

+ 6 - 0
libs/libs3/debian/changelog.Debian

@@ -0,0 +1,6 @@
+libs3 (all) unstable; urgency=low
+
+      * libs3 Debian maintainer and upstream author are identical.
+        Therefore see normal changelog file for Debian changes.
+
+ -- Bryan Ischo <[email protected]>  Wed, 06 Aug 2008 09:36:43 -0400

+ 12 - 0
libs/libs3/debian/control

@@ -0,0 +1,12 @@
+Package: libs3
+Source: THIS LINE WILL BE REMOVED, dpkg-shlibdepends NEEDS IT
+Version: LIBS3_VERSION
+Architecture: DEBIAN_ARCHITECTURE
+Section: net
+Priority: extra
+Maintainer: Bryan Ischo <[email protected]>
+Homepage: https://github.com/bji/libs3
+Description: C Library and Tools for Amazon S3 Access
+ This package includes the libs3 shared object library, needed to run
+ applications compiled against libs3, and additionally contains the s3
+ utility for accessing Amazon S3.

+ 26 - 0
libs/libs3/debian/control.dev

@@ -0,0 +1,26 @@
+Package: libs3-dev
+Version: LIBS3_VERSION
+Architecture: DEBIAN_ARCHITECTURE
+Section: libdevel
+Priority: extra
+Depends: libs3 (>= LIBS3_VERSION)
+Maintainer: Bryan Ischo <[email protected]>
+Homepage: https://github.com/bji/libs3
+Description: C Development Library for Amazon S3 Access
+ This library provides an API for using Amazon's S3 service (see
+ http://s3.amazonaws.com).  Its design goals are:
+ .
+  - To provide a simple and straightforward API for accessing all of S3's
+    functionality
+  - To not require the developer using libs3 to need to know anything about:
+      - HTTP
+      - XML
+      - SSL
+    In other words, this API is meant to stand on its own, without requiring
+    any implicit knowledge of how S3 services are accessed using HTTP
+    protocols.
+  - To be usable from multithreaded code
+  - To be usable by code which wants to process multiple S3 requests
+    simultaneously from a single thread
+  - To be usable in the simple, straightforward way using sequentialized
+    blocking requests

+ 3 - 0
libs/libs3/debian/postinst

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+ldconfig

+ 886 - 0
libs/libs3/doxyfile

@@ -0,0 +1,886 @@
+# Doxyfile 1.2.14
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# General configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = libs3
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER         = trunk
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = dox
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, 
+# German, Greek, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, 
+# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish.
+
+OUTPUT_LANGUAGE        = English
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these class will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited 
+# members of a class in the documentation of that class as if those members were 
+# ordinary class members. Constructors, destructors and assignment operators of 
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. It is allowed to use relative paths in the argument list.
+
+STRIP_FROM_PATH        = 
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower case letters. If set to YES upper case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# users are adviced to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments  will behave just like the Qt-style comments (thus requiring an 
+# explict @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# reimplements.
+
+INHERIT_DOCS           = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS       = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                = 
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consist of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources 
+# only. Doxygen will then generate output that is more tailored for C. 
+# For instance some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = YES
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE           = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT                  = inc/libs3.h
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp 
+# *.h++ *.idl *.odl
+
+FILE_PATTERNS          =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories 
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories.
+
+EXCLUDE_PATTERNS       = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
+EXAMPLE_PATH           = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH             = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.
+
+INPUT_FILTER           = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse.
+
+FILTER_SOURCE_FILES    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default) 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          = S3_
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
+HTML_HEADER            = 
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
+HTML_FOOTER            = 
+
+# The HTML_STYLESHEET tag can be used to specify a user defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet
+
+HTML_STYLESHEET        = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) 
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the Html help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, 
+# or Internet explorer 4.0+). Note that for large projects the tree generation 
+# can take a very long time. In such cases it is better to disable this feature. 
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimised for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assigments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_XML           = NO
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed.
+
+PREDEFINED             =  DOXYGEN
+
+# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line and do not end with a semicolon. Such function macros are typically 
+# used for boiler-plate code, and will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tagfiles.
+
+TAGFILES               = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or 
+# super classes. Setting the tag to NO turns the diagrams off. Note that this 
+# option is superceded by the HAVE_DOT option below. This is only a fallback. It is 
+# recommended to install and use dot, since it yield more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are gif, jpg, and png
+# If left blank gif will be used.
+
+DOT_IMAGE_FORMAT       = gif
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS           = 
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_WIDTH    = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT   = 1024
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermedate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP            = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to the search engine   
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = NO

+ 82 - 0
libs/libs3/inc/error_parser.h

@@ -0,0 +1,82 @@
+/** **************************************************************************
+ * error_parser.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef ERROR_PARSER_H
+#define ERROR_PARSER_H
+
+#include "libs3.h"
+#include "simplexml.h"
+#include "string_buffer.h"
+
+
+#define EXTRA_DETAILS_SIZE 8
+
+typedef struct ErrorParser
+{
+    // This is the S3ErrorDetails that this ErrorParser fills in from the
+    // data that it parses
+    S3ErrorDetails s3ErrorDetails;
+
+    // This is the error XML parser
+    SimpleXml errorXmlParser;
+
+    // Set to 1 after the first call to add
+    int errorXmlParserInitialized;
+
+    // Used to buffer the S3 Error Code as it is read in
+    string_buffer(code, 1024);
+
+    // Used to buffer the S3 Error Message as it is read in
+    string_buffer(message, 1024);
+
+    // Used to buffer the S3 Error Resource as it is read in
+    string_buffer(resource, 1024);
+
+    // Used to buffer the S3 Error Further Details as it is read in
+    string_buffer(furtherDetails, 1024);
+    
+    // The extra details; we support up to EXTRA_DETAILS_SIZE of them
+    S3NameValue extraDetails[EXTRA_DETAILS_SIZE];
+
+    // This is the buffer from which the names and values used in extraDetails
+    // are allocated
+    string_multibuffer(extraDetailsNamesValues, EXTRA_DETAILS_SIZE * 1024);
+} ErrorParser;
+
+
+// Always call this
+void error_parser_initialize(ErrorParser *errorParser);
+
+S3Status error_parser_add(ErrorParser *errorParser, char *buffer,
+                          int bufferSize);
+
+void error_parser_convert_status(ErrorParser *errorParser, S3Status *status);
+
+// Always call this
+void error_parser_deinitialize(ErrorParser *errorParser);
+
+
+#endif /* ERROR_PARSER_H */

+ 2528 - 0
libs/libs3/inc/libs3.h

@@ -0,0 +1,2528 @@
+/** **************************************************************************
+ * @file libs3.h
+ * @details
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef LIBS3_H
+#define LIBS3_H
+
+#include <stdint.h>
+#include <sys/select.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/** **************************************************************************
+ * @mainpage
+ * Overview
+ * --------
+ *
+ * This library provides an API for using Amazon's S3 service (see
+ * http://s3.amazonaws.com).  Its design goals are:
+ *
+ * - To provide a simple and straightforward API for accessing all of S3's
+ *   functionality
+ * - To not require the developer using libs3 to need to know anything about:
+ *     - HTTP
+ *     - XML
+ *     - SSL
+ *
+ *   In other words, this API is meant to stand on its own, without requiring
+ *   any implicit knowledge of how S3 services are accessed using HTTP
+ *   protocols.
+ *
+ * - To be usable from multithreaded code
+ * - To be usable by code which wants to process multiple S3 requests
+ *   simultaneously from a single thread
+ * - To be usable in the simple, straightforward way using sequentialized
+ *   blocking requests
+ *
+ * The general usage pattern of libs3 is:
+ *
+ * - Initialize libs3 once per program by calling S3_initialize() at program
+ *   start up time
+ * - Make any number of requests to S3 for getting, putting, or listing
+ *   S3 buckets or objects, or modifying the ACLs associated with buckets
+ *   or objects, using one of three general approaches:
+ *   1. Simple blocking requests, one at a time
+ *   2. Multiple threads each making simple blocking requests
+ *   3. From a single thread, managing multiple S3 requests simultaneously
+ *      using file descriptors and a select()/poll() loop
+ * - Shut down libs3 at program exit time by calling S3_deinitialize()
+ *
+ * All functions which send requests to S3 return their results via a set of
+ * callback functions which must be supplied to libs3 at the time that the
+ * request is initiated.  libs3 will call these functions back in the thread
+ * calling the libs3 function if blocking requests are made (i.e., if the
+ * S3RequestContext for the function invocation is passed in as NULL).
+ * If an S3RequestContext is used to drive multiple S3 requests
+ * simultaneously, then the callbacks will be made from the thread which
+ * calls S3_runall_request_context() or S3_runonce_request_context(), or
+ * possibly from the thread which calls S3_destroy_request_context(), if
+ * S3 requests are in progress at the time that this function is called.
+ *
+ * NOTE: Response headers from Amazon S3 are limited to 4K (2K of metas is all
+ * that Amazon supports, and libs3 allows Amazon an additional 2K of headers).
+ *
+ * NOTE: Because HTTP and the S3 REST protocol are highly under-specified,
+ * libs3 must make some assumptions about the maximum length of certain HTTP
+ * elements (such as headers) that it will accept.  While efforts have been
+ * made to enforce maximums which are beyond that expected to be needed by any
+ * user of S3, it is always possible that these maximums may be too low in
+ * some rare circumstances.  Bug reports should this unlikely situation occur
+ * would be most appreciated.
+ *
+ * Threading Rules
+ * ---------------
+ *
+ * 1. All arguments passed to any function must not be modified directly until
+ *    the function returns.
+ * 2. All S3RequestContext and S3Request arguments passed to all functions may
+ *    not be passed to any other libs3 function by any other thread until the
+ *    function returns.
+ * 3. All functions may be called simultaneously by multiple threads as long
+ *    as (1) and (2) are observed, EXCEPT for S3_initialize(), which must be
+ *    called from one thread at a time only.
+ * 4. All callbacks will be made in the thread of the caller of the function
+ *    which invoked them, so the caller of all libs3 functions should not hold
+ *    locks that it would try to re-acquire in a callback, as this may
+ *    deadlock.
+ ************************************************************************** **/
+
+
+/** **************************************************************************
+ * Constants
+ ************************************************************************** **/
+
+/**
+ * S3_MAX_HOSTNAME_SIZE is the maximum size we allow for a host name
+ **/
+#define S3_MAX_HOSTNAME_SIZE               255
+
+/**
+ * This is the default hostname that is being used for the S3 requests
+ **/
+#define S3_DEFAULT_HOSTNAME                "s3.amazonaws.com"
+
+
+/**
+ * S3_MAX_BUCKET_NAME_SIZE is the maximum size of a bucket name.
+ **/
+
+#define S3_MAX_BUCKET_NAME_SIZE            255
+
+/**
+ * S3_MAX_KEY_SIZE is the maximum size of keys that Amazon S3 supports.
+ **/
+#define S3_MAX_KEY_SIZE                    1024
+
+
+/**
+ * S3_MAX_METADATA_SIZE is the maximum number of bytes allowed for
+ * x-amz-meta header names and values in any request passed to Amazon S3
+ **/
+#define S3_MAX_METADATA_SIZE               2048
+
+
+/**
+ * S3_METADATA_HEADER_NAME_PREFIX is the prefix of an S3 "meta header"
+ **/
+#define S3_METADATA_HEADER_NAME_PREFIX     "x-amz-meta-"
+
+
+/**
+ * S3_MAX_METADATA_COUNT is the maximum number of x-amz-meta- headers that
+ * could be included in a request to S3.  The smallest meta header is
+ * "x-amz-meta-n: v".  Since S3 doesn't count the ": " against the total, the
+ * smallest amount of data to count for a header would be the length of
+ * "x-amz-meta-nv".
+ **/
+#define S3_MAX_METADATA_COUNT \
+    (S3_MAX_METADATA_SIZE / (sizeof(S3_METADATA_HEADER_NAME_PREFIX "nv") - 1))
+
+
+/**
+ * S3_MAX_ACL_GRANT_COUNT is the maximum number of ACL grants that may be
+ * set on a bucket or object at one time.  It is also the maximum number of
+ * ACL grants that the XML ACL parsing routine will parse.
+ **/
+#define S3_MAX_ACL_GRANT_COUNT             100
+
+
+/**
+ * This is the maximum number of characters (including terminating \0) that
+ * libs3 supports in an ACL grantee email address.
+ **/
+#define S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE  128
+
+
+/**
+ * This is the maximum number of characters (including terminating \0) that
+ * libs3 supports in an ACL grantee user id.
+ **/
+#define S3_MAX_GRANTEE_USER_ID_SIZE        128
+
+
+/**
+ * This is the maximum number of characters (including terminating \0) that
+ * libs3 supports in an ACL grantee user display name.
+ **/
+#define S3_MAX_GRANTEE_DISPLAY_NAME_SIZE   128
+
+
+/**
+ * This is the maximum number of characters that will be stored in the
+ * return buffer for the utility function which computes an HTTP authenticated
+ * query string
+ **/
+#define S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE \
+    (sizeof("https:///") + S3_MAX_HOSTNAME_SIZE + (S3_MAX_KEY_SIZE * 3) + \
+     sizeof("?AWSAccessKeyId=") + 32 + sizeof("&Expires=") + 32 + \
+     sizeof("&Signature=") + 28 + 1)
+
+
+/**
+ * This constant is used by the S3_initialize() function, to specify that
+ * the winsock library should be initialized by libs3; only relevent on
+ * Microsoft Windows platforms.
+ **/
+#define S3_INIT_WINSOCK                    1
+/**
+ * This constant is used by the S3_initialize() function, to enable peer SSL
+ * certificate by default for all requests.  If this is not set in the
+ * flags passed to S3_initialize(), then the SSL certificate of the peer
+ * will NOT be verified by default (but can still be enabled on a per request
+ * basis by calling S3_set_request_context_verify_peer).
+ */
+#define S3_INIT_VERIFY_PEER                2
+
+
+/**
+ * This convenience constant is used by the S3_initialize() function to
+ * indicate that all libraries required by libs3 should be initialized.
+ **/
+#define S3_INIT_ALL                        (S3_INIT_WINSOCK)
+
+
+/**
+ * The default region identifier used to scope the signing key
+ */
+#define S3_DEFAULT_REGION                  "us-east-1"
+
+
+/** **************************************************************************
+ * Enumerations
+ ************************************************************************** **/
+
+/**
+ * S3Status is a status code as returned by a libs3 function.  The meaning of
+ * each status code is defined in the comments for each function which returns
+ * that status.
+ **/
+typedef enum
+{
+    S3StatusOK                                              ,
+
+    /**
+     * Errors that prevent the S3 request from being issued or response from
+     * being read
+     **/
+    S3StatusInternalError                                   ,
+    S3StatusOutOfMemory                                     ,
+    S3StatusInterrupted                                     ,
+    S3StatusInvalidBucketNameTooLong                        ,
+    S3StatusInvalidBucketNameFirstCharacter                 ,
+    S3StatusInvalidBucketNameCharacter                      ,
+    S3StatusInvalidBucketNameCharacterSequence              ,
+    S3StatusInvalidBucketNameTooShort                       ,
+    S3StatusInvalidBucketNameDotQuadNotation                ,
+    S3StatusQueryParamsTooLong                              ,
+    S3StatusFailedToInitializeRequest                       ,
+    S3StatusMetaDataHeadersTooLong                          ,
+    S3StatusBadMetaData                                     ,
+    S3StatusBadContentType                                  ,
+    S3StatusContentTypeTooLong                              ,
+    S3StatusBadMD5                                          ,
+    S3StatusMD5TooLong                                      ,
+    S3StatusBadCacheControl                                 ,
+    S3StatusCacheControlTooLong                             ,
+    S3StatusBadContentDispositionFilename                   ,
+    S3StatusContentDispositionFilenameTooLong               ,
+    S3StatusBadContentEncoding                              ,
+    S3StatusContentEncodingTooLong                          ,
+    S3StatusBadIfMatchETag                                  ,
+    S3StatusIfMatchETagTooLong                              ,
+    S3StatusBadIfNotMatchETag                               ,
+    S3StatusIfNotMatchETagTooLong                           ,
+    S3StatusHeadersTooLong                                  ,
+    S3StatusKeyTooLong                                      ,
+    S3StatusUriTooLong                                      ,
+    S3StatusXmlParseFailure                                 ,
+    S3StatusEmailAddressTooLong                             ,
+    S3StatusUserIdTooLong                                   ,
+    S3StatusUserDisplayNameTooLong                          ,
+    S3StatusGroupUriTooLong                                 ,
+    S3StatusPermissionTooLong                               ,
+    S3StatusTargetBucketTooLong                             ,
+    S3StatusTargetPrefixTooLong                             ,
+    S3StatusTooManyGrants                                   ,
+    S3StatusBadGrantee                                      ,
+    S3StatusBadPermission                                   ,
+    S3StatusXmlDocumentTooLarge                             ,
+    S3StatusNameLookupError                                 ,
+    S3StatusFailedToConnect                                 ,
+    S3StatusServerFailedVerification                        ,
+    S3StatusConnectionFailed                                ,
+    S3StatusAbortedByCallback                               ,
+    S3StatusNotSupported                                    ,
+
+    /**
+     * Errors from the S3 service
+     **/
+    S3StatusErrorAccessDenied                               ,
+    S3StatusErrorAccountProblem                             ,
+    S3StatusErrorAmbiguousGrantByEmailAddress               ,
+    S3StatusErrorBadDigest                                  ,
+    S3StatusErrorBucketAlreadyExists                        ,
+    S3StatusErrorBucketAlreadyOwnedByYou                    ,
+    S3StatusErrorBucketNotEmpty                             ,
+    S3StatusErrorCredentialsNotSupported                    ,
+    S3StatusErrorCrossLocationLoggingProhibited             ,
+    S3StatusErrorEntityTooSmall                             ,
+    S3StatusErrorEntityTooLarge                             ,
+    S3StatusErrorExpiredToken                               ,
+    S3StatusErrorIllegalVersioningConfigurationException    ,
+    S3StatusErrorIncompleteBody                             ,
+    S3StatusErrorIncorrectNumberOfFilesInPostRequest        ,
+    S3StatusErrorInlineDataTooLarge                         ,
+    S3StatusErrorInternalError                              ,
+    S3StatusErrorInvalidAccessKeyId                         ,
+    S3StatusErrorInvalidAddressingHeader                    ,
+    S3StatusErrorInvalidArgument                            ,
+    S3StatusErrorInvalidBucketName                          ,
+    S3StatusErrorInvalidBucketState                         ,
+    S3StatusErrorInvalidDigest                              ,
+    S3StatusErrorInvalidEncryptionAlgorithmError            ,
+    S3StatusErrorInvalidLocationConstraint                  ,
+    S3StatusErrorInvalidObjectState                         ,
+    S3StatusErrorInvalidPart                                ,
+    S3StatusErrorInvalidPartOrder                           ,
+    S3StatusErrorInvalidPayer                               ,
+    S3StatusErrorInvalidPolicyDocument                      ,
+    S3StatusErrorInvalidRange                               ,
+    S3StatusErrorInvalidRequest                             ,
+    S3StatusErrorInvalidSecurity                            ,
+    S3StatusErrorInvalidSOAPRequest                         ,
+    S3StatusErrorInvalidStorageClass                        ,
+    S3StatusErrorInvalidTargetBucketForLogging              ,
+    S3StatusErrorInvalidToken                               ,
+    S3StatusErrorInvalidURI                                 ,
+    S3StatusErrorKeyTooLong                                 ,
+    S3StatusErrorMalformedACLError                          ,
+    S3StatusErrorMalformedPOSTRequest                       ,
+    S3StatusErrorMalformedXML                               ,
+    S3StatusErrorMaxMessageLengthExceeded                   ,
+    S3StatusErrorMaxPostPreDataLengthExceededError          ,
+    S3StatusErrorMetadataTooLarge                           ,
+    S3StatusErrorMethodNotAllowed                           ,
+    S3StatusErrorMissingAttachment                          ,
+    S3StatusErrorMissingContentLength                       ,
+    S3StatusErrorMissingRequestBodyError                    ,
+    S3StatusErrorMissingSecurityElement                     ,
+    S3StatusErrorMissingSecurityHeader                      ,
+    S3StatusErrorNoLoggingStatusForKey                      ,
+    S3StatusErrorNoSuchBucket                               ,
+    S3StatusErrorNoSuchKey                                  ,
+    S3StatusErrorNoSuchLifecycleConfiguration               ,
+    S3StatusErrorNoSuchUpload                               ,
+    S3StatusErrorNoSuchVersion                              ,
+    S3StatusErrorNotImplemented                             ,
+    S3StatusErrorNotSignedUp                                ,
+    S3StatusErrorNoSuchBucketPolicy                         ,
+    S3StatusErrorOperationAborted                           ,
+    S3StatusErrorPermanentRedirect                          ,
+    S3StatusErrorPreconditionFailed                         ,
+    S3StatusErrorRedirect                                   ,
+    S3StatusErrorRestoreAlreadyInProgress                   ,
+    S3StatusErrorRequestIsNotMultiPartContent               ,
+    S3StatusErrorRequestTimeout                             ,
+    S3StatusErrorRequestTimeTooSkewed                       ,
+    S3StatusErrorRequestTorrentOfBucketError                ,
+    S3StatusErrorSignatureDoesNotMatch                      ,
+    S3StatusErrorServiceUnavailable                         ,
+    S3StatusErrorSlowDown                                   ,
+    S3StatusErrorTemporaryRedirect                          ,
+    S3StatusErrorTokenRefreshRequired                       ,
+    S3StatusErrorTooManyBuckets                             ,
+    S3StatusErrorUnexpectedContent                          ,
+    S3StatusErrorUnresolvableGrantByEmailAddress            ,
+    S3StatusErrorUserKeyMustBeSpecified                     ,
+    S3StatusErrorQuotaExceeded                              ,
+    S3StatusErrorUnknown                                    ,
+
+    /**
+     * The following are HTTP errors returned by S3 without enough detail to
+     * distinguish any of the above S3StatusError conditions
+     **/
+    S3StatusHttpErrorMovedTemporarily                       ,
+    S3StatusHttpErrorBadRequest                             ,
+    S3StatusHttpErrorForbidden                              ,
+    S3StatusHttpErrorNotFound                               ,
+    S3StatusHttpErrorConflict                               ,
+    S3StatusHttpErrorUnknown
+} S3Status;
+
+
+/**
+ * S3Protocol represents a protocol that may be used for communicating a
+ * request to the Amazon S3 service.
+ *
+ * In general, HTTPS is greatly preferred (and should be the default of any
+ * application using libs3) because it protects any data being sent to or
+ * from S3 using strong encryption.  However, HTTPS is much more CPU intensive
+ * than HTTP, and if the caller is absolutely certain that it is OK for the
+ * data to be viewable by anyone in transit, then HTTP can be used.
+ **/
+typedef enum
+{
+    S3ProtocolHTTPS                     = 0,
+    S3ProtocolHTTP                      = 1
+} S3Protocol;
+
+
+/**
+ * S3UriStyle defines the form that an Amazon S3 URI identifying a bucket or
+ * object can take.  They are of these forms:
+ *
+ * Virtual Host: ${protocol}://${bucket}.s3.amazonaws.com/[${key}]
+ * Path: ${protocol}://s3.amazonaws.com/${bucket}/[${key}]
+ *
+ * It is generally better to use the Virual Host URI form, because it ensures
+ * that the bucket name used is compatible with normal HTTP GETs and POSTs of
+ * data to/from the bucket.  However, if DNS lookups for the bucket are too
+ * slow or unreliable for some reason, Path URI form may be used.
+ **/
+typedef enum
+{
+    S3UriStyleVirtualHost               = 0,
+    S3UriStylePath                      = 1
+} S3UriStyle;
+
+
+/**
+ * S3GranteeType defines the type of Grantee used in an S3 ACL Grant.
+ * Amazon Customer By Email - identifies the Grantee using their Amazon S3
+ *     account email address
+ * Canonical User - identifies the Grantee by S3 User ID and Display Name,
+ *     which can only be obtained by making requests to S3, for example, by
+ *     listing owned buckets
+ * All AWS Users - identifies all authenticated AWS users
+ * All Users - identifies all users
+ * Log Delivery - identifies the Amazon group responsible for writing
+ *                server access logs into buckets
+ **/
+typedef enum
+{
+    S3GranteeTypeAmazonCustomerByEmail  = 0,
+    S3GranteeTypeCanonicalUser          = 1,
+    S3GranteeTypeAllAwsUsers            = 2,
+    S3GranteeTypeAllUsers               = 3,
+    S3GranteeTypeLogDelivery            = 4
+} S3GranteeType;
+
+
+/**
+ * This is an individual permission granted to a grantee in an S3 ACL Grant.
+ * Read permission gives the Grantee the permission to list the bucket, or
+ *     read the object or its metadata
+ * Write permission gives the Grantee the permission to create, overwrite, or
+ *     delete any object in the bucket, and is not supported for objects
+ * ReadACP permission gives the Grantee the permission to read the ACP for
+ *     the bucket or object; the owner of the bucket or object always has
+ *     this permission implicitly
+ * WriteACP permission gives the Grantee the permission to overwrite the ACP
+ *     for the bucket or object; the owner of the bucket or object always has
+ *     this permission implicitly
+ * FullControl permission gives the Grantee all permissions specified by the
+ *     Read, Write, ReadACP, and WriteACP permissions
+ **/
+typedef enum
+{
+    S3PermissionRead                    = 0,
+    S3PermissionWrite                   = 1,
+    S3PermissionReadACP                 = 2,
+    S3PermissionWriteACP                = 3,
+    S3PermissionFullControl             = 4
+} S3Permission;
+
+
+/**
+ * S3CannedAcl is an ACL that can be specified when an object is created or
+ * updated.  Each canned ACL has a predefined value when expanded to a full
+ * set of S3 ACL Grants.
+ * Private canned ACL gives the owner FULL_CONTROL and no other permissions
+ *     are issued
+ * Public Read canned ACL gives the owner FULL_CONTROL and all users Read
+ *     permission
+ * Public Read Write canned ACL gives the owner FULL_CONTROL and all users
+ *     Read and Write permission
+ * AuthenticatedRead canned ACL gives the owner FULL_CONTROL and authenticated
+ *     S3 users Read permission
+ **/
+typedef enum
+{
+    S3CannedAclPrivate                  = 0, /* private */
+    S3CannedAclPublicRead               = 1, /* public-read */
+    S3CannedAclPublicReadWrite          = 2, /* public-read-write */
+    S3CannedAclAuthenticatedRead        = 3  /* authenticated-read */
+} S3CannedAcl;
+
+
+/** **************************************************************************
+ * Data Types
+ ************************************************************************** **/
+
+/**
+ * An S3RequestContext manages multiple S3 requests simultaneously; see the
+ * S3_XXX_request_context functions below for details
+ **/
+typedef struct S3RequestContext S3RequestContext;
+
+
+/**
+ * S3NameValue represents a single Name - Value pair, used to represent either
+ * S3 metadata associated with a key, or S3 error details.
+ **/
+typedef struct S3NameValue
+{
+    /**
+     * The name part of the Name - Value pair
+     **/
+    const char *name;
+
+    /**
+     * The value part of the Name - Value pair
+     **/
+    const char *value;
+} S3NameValue;
+
+
+/**
+ * S3ResponseProperties is passed to the properties callback function which is
+ * called when the complete response properties have been received.  Some of
+ * the fields of this structure are optional and may not be provided in the
+ * response, and some will always be provided in the response.
+ **/
+typedef struct S3ResponseProperties
+{
+    /**
+     * This optional field identifies the request ID and may be used when
+     * reporting problems to Amazon.
+     **/
+    const char *requestId;
+
+    /**
+     * This optional field identifies the request ID and may be used when
+     * reporting problems to Amazon.
+     **/
+    const char *requestId2;
+
+    /**
+     * This optional field is the content type of the data which is returned
+     * by the request.  If not provided, the default can be assumed to be
+     * "binary/octet-stream".
+     **/
+    const char *contentType;
+
+    /**
+     * This optional field is the content length of the data which is returned
+     * in the response.  A negative value means that this value was not
+     * provided in the response.  A value of 0 means that there is no content
+     * provided.  A positive value gives the number of bytes in the content of
+     * the response.
+     **/
+    uint64_t contentLength;
+
+    /**
+     * This optional field names the server which serviced the request.
+     **/
+    const char *server;
+
+    /**
+     * This optional field provides a string identifying the unique contents
+     * of the resource identified by the request, such that the contents can
+     * be assumed not to be changed if the same eTag is returned at a later
+     * time decribing the same resource.  This is an MD5 sum of the contents.
+     **/
+    const char *eTag;
+
+    /**
+     * This optional field provides the last modified time, relative to the
+     * Unix epoch, of the contents.  If this value is < 0, then the last
+     * modified time was not provided in the response.  If this value is >= 0,
+     * then the last modified date of the contents are available as a number
+     * of seconds since the UNIX epoch.
+     *
+     **/
+    int64_t lastModified;
+
+    /**
+     * This is the number of user-provided meta data associated with the
+     * resource.
+     **/
+    int metaDataCount;
+
+    /**
+     * These are the meta data associated with the resource.  In each case,
+     * the name will not include any S3-specific header prefixes
+     * (i.e. x-amz-meta- will have been removed from the beginning), and
+     * leading and trailing whitespace will have been stripped from the value.
+     **/
+    const S3NameValue *metaData;
+
+    /**
+     * This optional field provides an indication of whether or not
+     * server-side encryption was used for the object.  This field is only
+     * meaningful if the request was an object put, copy, get, or head
+     * request.
+     * If this value is 0, then server-side encryption is not in effect for
+     * the object (or the request was one for which server-side encryption is
+     * not a meaningful value); if this value is non-zero, then server-side
+     * encryption is in effect for the object.
+     **/
+    char usesServerSideEncryption;
+} S3ResponseProperties;
+
+
+/**
+ * S3AclGrant identifies a single grant in the ACL for a bucket or object.  An
+ * ACL is composed of any number of grants, which specify a grantee and the
+ * permissions given to that grantee.  S3 does not normalize ACLs in any way,
+ * so a redundant ACL specification will lead to a redundant ACL stored in S3.
+ **/
+typedef struct S3AclGrant
+{
+    /**
+     * The granteeType gives the type of grantee specified by this grant.
+     **/
+    S3GranteeType granteeType;
+    /**
+     * The identifier of the grantee that is set is determined by the
+     * granteeType:
+     *
+     * S3GranteeTypeAmazonCustomerByEmail - amazonCustomerByEmail.emailAddress
+     * S3GranteeTypeCanonicalUser - canonicalUser.id, canonicalUser.displayName
+     * S3GranteeTypeAllAwsUsers - none
+     * S3GranteeTypeAllUsers - none
+     **/
+    union
+    {
+        /**
+         * This structure is used iff the granteeType is
+         * S3GranteeTypeAmazonCustomerByEmail.
+         **/
+        struct
+        {
+            /**
+             * This is the email address of the Amazon Customer being granted
+             * permissions by this S3AclGrant.
+             **/
+            char emailAddress[S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE];
+        } amazonCustomerByEmail;
+        /**
+         * This structure is used iff the granteeType is
+         * S3GranteeTypeCanonicalUser.
+         **/
+        struct
+        {
+            /**
+             * This is the CanonicalUser ID of the grantee
+             **/
+            char id[S3_MAX_GRANTEE_USER_ID_SIZE];
+            /**
+             * This is the display name of the grantee
+             **/
+            char displayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+        } canonicalUser;
+    } grantee;
+    /**
+     * This is the S3Permission to be granted to the grantee
+     **/
+    S3Permission permission;
+} S3AclGrant;
+
+
+/**
+ * A context for working with objects within a bucket.  A bucket context holds
+ * all information necessary for working with a bucket, and may be used
+ * repeatedly over many consecutive (or simultaneous) calls into libs3 bucket
+ * operation functions.
+ **/
+typedef struct S3BucketContext
+{
+    /**
+     * The name of the host to connect to when making S3 requests.  If set to
+     * NULL, the default S3 hostname passed in to S3_initialize will be used.
+     **/
+    const char *hostName;
+
+    /**
+     * The name of the bucket to use in the bucket context
+     **/
+    const char *bucketName;
+
+    /**
+     * The protocol to use when accessing the bucket
+     **/
+    S3Protocol protocol;
+
+    /**
+     * The URI style to use for all URIs sent to Amazon S3 while working with
+     * this bucket context
+     **/
+    S3UriStyle uriStyle;
+
+    /**
+     * The Amazon Access Key ID to use for access to the bucket
+     **/
+    const char *accessKeyId;
+
+    /**
+     *  The Amazon Secret Access Key to use for access to the bucket
+     **/
+    const char *secretAccessKey;
+
+    /**
+     *  The Amazon Security Token used to generate Temporary Security Credentials
+     **/
+    const char *securityToken;
+
+    /**
+     * The AWS region to which to scope the signing key used for authorization.
+     * If NULL, the default region ("us-east-1") will be used.
+     */
+    const char *authRegion;
+} S3BucketContext;
+
+
+/**
+ * This is a single entry supplied to the list bucket callback by a call to
+ * S3_list_bucket.  It identifies a single matching key from the list
+ * operation.
+ **/
+typedef struct S3ListBucketContent
+{
+    /**
+     * This is the next key in the list bucket results.
+     **/
+    const char *key;
+
+    /**
+     * This is the number of seconds since UNIX epoch of the last modified
+     * date of the object identified by the key.
+     **/
+    int64_t lastModified;
+
+    /**
+     * This gives a tag which gives a signature of the contents of the object,
+     * which is the MD5 of the contents of the object.
+     **/
+    const char *eTag;
+
+    /**
+     * This is the size of the object in bytes.
+     **/
+    uint64_t size;
+
+    /**
+     * This is the ID of the owner of the key; it is present only if access
+     * permissions allow it to be viewed.
+     **/
+    const char *ownerId;
+
+    /**
+     * This is the display name of the owner of the key; it is present only if
+     * access permissions allow it to be viewed.
+     **/
+    const char *ownerDisplayName;
+} S3ListBucketContent;
+
+
+/**
+ * This is a single entry supplied to the list bucket callback by a call to
+ * S3_list_bucket.  It identifies a single matching key from the list
+ * operation.
+ **/
+typedef struct S3ListMultipartUpload
+{
+    /**
+     * This is the next key in the list bucket results.
+     **/
+    const char *key;
+
+    const char *uploadId;
+    const char *initiatorId;
+    const char *initiatorDisplayName;
+
+    /**
+     * This is the ID of the owner of the key; it is present only if access
+     * permissions allow it to be viewed.
+     **/
+    const char *ownerId;
+
+    /**
+     * This is the display name of the owner of the key; it is present only if
+     * access permissions allow it to be viewed.
+     **/
+    const char *ownerDisplayName;
+
+    const char *storageClass;
+
+    /**
+     * This is the number of seconds since UNIX epoch of the last modified
+     * date of the object identified by the key.
+     **/
+    int64_t initiated;
+
+} S3ListMultipartUpload;
+
+
+typedef struct S3ListPart
+{
+    const char *eTag;
+
+    /**
+     * This is the number of seconds since UNIX epoch of the last modified
+     * date of the object identified by the key.
+     **/
+    int64_t lastModified;
+    uint64_t partNumber;
+    uint64_t size;
+} S3ListPart;
+
+
+/**
+ * S3PutProperties is the set of properties that may optionally be set by the
+ * user when putting objects to S3.  Each field of this structure is optional
+ * and may or may not be present.
+ **/
+typedef struct S3PutProperties
+{
+    /**
+     * If present, this is the Content-Type that should be associated with the
+     * object.  If not provided, S3 defaults to "binary/octet-stream".
+     **/
+    const char *contentType;
+
+    /**
+     * If present, this provides the MD5 signature of the contents, and is
+     * used to validate the contents.  This is highly recommended by Amazon
+     * but not required.  Its format is as a base64-encoded MD5 sum.
+     **/
+    const char *md5;
+
+    /**
+     * If present, this gives a Cache-Control header string to be supplied to
+     * HTTP clients which download this
+     **/
+    const char *cacheControl;
+
+    /**
+     * If present, this gives the filename to save the downloaded file to,
+     * whenever the object is downloaded via a web browser.  This is only
+     * relevent for objects which are intended to be shared to users via web
+     * browsers and which is additionally intended to be downloaded rather
+     * than viewed.
+     **/
+    const char *contentDispositionFilename;
+
+    /**
+     * If present, this identifies the content encoding of the object.  This
+     * is only applicable to encoded (usually, compressed) content, and only
+     * relevent if the object is intended to be downloaded via a browser.
+     **/
+    const char *contentEncoding;
+
+    /**
+     * If >= 0, this gives an expiration date for the content.  This
+     * information is typically only delivered to users who download the
+     * content via a web browser.
+     **/
+    int64_t expires;
+
+    /**
+     * This identifies the "canned ACL" that should be used for this object.
+     * The default (0) gives only the owner of the object access to it.
+     **/
+    S3CannedAcl cannedAcl;
+
+    /**
+     * This is the number of values in the metaData field.
+     **/
+    int metaDataCount;
+
+    /**
+     * These are the meta data to pass to S3.  In each case, the name part of
+     * the Name - Value pair should not include any special S3 HTTP header
+     * prefix (i.e., should be of the form 'foo', NOT 'x-amz-meta-foo').
+     **/
+    const S3NameValue *metaData;
+
+    /**
+     * This a boolean value indicating whether or not the object should be
+     * stored by Amazon S3 using server-side encryption, wherein the data is
+     * encrypted by Amazon before being stored on permanent medium.
+     * Server-side encryption does not affect the data as it is sent to or
+     * received by Amazon, the encryption is applied by Amazon when objects
+     * are put and then de-encryption is applied when the objects are read by
+     * clients.
+     * If this value is 0, then server-side encryption is not used; if this
+     * value is non-zero, then server-side encryption is used.  Note that the
+     * encryption status of the object can be checked by ensuring that the put
+     * response has the usesServerSideEncryption flag set.
+     **/
+    char useServerSideEncryption;
+} S3PutProperties;
+
+
+/**
+ * S3GetConditions is used for the get_object operation, and specifies
+ * conditions which the object must meet in order to be successfully returned.
+ **/
+typedef struct S3GetConditions
+{
+    /**
+     * The request will be processed if the Last-Modification header of the
+     * object is greater than or equal to this value, specified as a number of
+     * seconds since Unix epoch.  If this value is less than zero, it will not
+     * be used in the conditional.
+     **/
+    int64_t ifModifiedSince;
+
+    /**
+     * The request will be processed if the Last-Modification header of the
+     * object is less than this value, specified as a number of seconds since
+     * Unix epoch.  If this value is less than zero, it will not be used in
+     * the conditional.
+     **/
+    int64_t ifNotModifiedSince;
+
+    /**
+     * If non-NULL, this gives an eTag header value which the object must
+     * match in order to be returned.  Note that altough the eTag is simply an
+     * MD5, this must be presented in the S3 eTag form, which typically
+     * includes double-quotes.
+     **/
+    const char *ifMatchETag;
+
+    /**
+     * If non-NULL, this gives an eTag header value which the object must not
+     * match in order to be returned.  Note that altough the eTag is simply an
+     * MD5, this must be presented in the S3 eTag form, which typically
+     * includes double-quotes.
+     **/
+    const char *ifNotMatchETag;
+} S3GetConditions;
+
+
+/**
+ * S3ErrorDetails provides detailed information describing an S3 error.  This
+ * is only presented when the error is an S3-generated error (i.e. one of the
+ * S3StatusErrorXXX values).
+ **/
+typedef struct S3ErrorDetails
+{
+    /**
+     * This is the human-readable message that Amazon supplied describing the
+     * error
+     **/
+    const char *message;
+
+    /**
+     * This identifies the resource for which the error occurred
+     **/
+    const char *resource;
+
+    /**
+     * This gives human-readable further details describing the specifics of
+     * this error
+     **/
+    const char *furtherDetails;
+
+    /**
+     * This gives the number of S3NameValue pairs present in the extraDetails
+     * array
+     **/
+    int extraDetailsCount;
+
+    /**
+     * S3 can provide extra details in a freeform Name - Value pair format.
+     * Each error can have any number of these, and this array provides these
+     * additional extra details.
+     **/
+    S3NameValue *extraDetails;
+} S3ErrorDetails;
+
+
+/** **************************************************************************
+ * Callback Signatures
+ ************************************************************************** **/
+
+/**
+ * This callback is made whenever the response properties become available for
+ * any request.
+ *
+ * @param properties are the properties that are available from the response
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3ResponsePropertiesCallback)
+    (const S3ResponseProperties *properties, void *callbackData);
+
+
+/**
+ * This callback is made when the response has been completely received, or an
+ * error has occurred which has prematurely aborted the request, or one of the
+ * other user-supplied callbacks returned a value intended to abort the
+ * request.  This callback is always made for every request, as the very last
+ * callback made for that request.
+ *
+ * @param status gives the overall status of the response, indicating success
+ *        or failure; use S3_status_is_retryable() as a simple way to detect
+ *        whether or not the status indicates that the request failed but may
+ *        be retried.
+ * @param errorDetails if non-NULL, gives details as returned by the S3
+ *        service, describing the error
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ **/
+typedef void (S3ResponseCompleteCallback)(S3Status status,
+                                          const S3ErrorDetails *errorDetails,
+                                          void *callbackData);
+
+
+/**
+ * This callback is made for each bucket resulting from a list service
+ * operation.
+ *
+ * @param ownerId is the ID of the owner of the bucket
+ * @param ownerDisplayName is the owner display name of the owner of the bucket
+ * @param bucketName is the name of the bucket
+ * @param creationDateSeconds if < 0 indicates that no creation date was
+ *        supplied for the bucket; if >= 0 indicates the number of seconds
+ *        since UNIX Epoch of the creation date of the bucket
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3ListServiceCallback)(const char *ownerId,
+                                         const char *ownerDisplayName,
+                                         const char *bucketName,
+                                         int64_t creationDateSeconds,
+                                         void *callbackData);
+
+
+/**
+ * This callback is made repeatedly as a list bucket operation progresses.
+ * The contents reported via this callback are only reported once per list
+ * bucket operation, but multiple calls to this callback may be necessary to
+ * report all items resulting from the list bucket operation.
+ *
+ * @param isTruncated is true if the list bucket request was truncated by the
+ *        S3 service, in which case the remainder of the list may be obtained
+ *        by querying again using the Marker parameter to start the query
+ *        after this set of results
+ * @param nextMarker if present, gives the largest (alphabetically) key
+ *        returned in the response, which, if isTruncated is true, may be used
+ *        as the marker in a subsequent list buckets operation to continue
+ *        listing
+ * @param contentsCount is the number of ListBucketContent structures in the
+ *        contents parameter
+ * @param contents is an array of ListBucketContent structures, each one
+ *        describing an object in the bucket
+ * @param commonPrefixesCount is the number of common prefixes strings in the
+ *        commonPrefixes parameter
+ * @param commonPrefixes is an array of strings, each specifing one of the
+ *        common prefixes as returned by S3
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3ListBucketCallback)(int isTruncated,
+                                        const char *nextMarker,
+                                        int contentsCount,
+                                        const S3ListBucketContent *contents,
+                                        int commonPrefixesCount,
+                                        const char **commonPrefixes,
+                                        void *callbackData);
+
+
+/**
+ * This callback is made during a put object operation, to obtain the next
+ * chunk of data to put to the S3 service as the contents of the object.  This
+ * callback is made repeatedly, each time acquiring the next chunk of data to
+ * write to the service, until a negative or 0 value is returned.
+ *
+ * @param bufferSize gives the maximum number of bytes that may be written
+ *        into the buffer parameter by this callback
+ * @param buffer gives the buffer to fill with at most bufferSize bytes of
+ *        data as the next chunk of data to send to S3 as the contents of this
+ *        object
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return < 0 to abort the request with the S3StatusAbortedByCallback, which
+ *        will be pased to the response complete callback for this request, or
+ *        0 to indicate the end of data, or > 0 to identify the number of
+ *        bytes that were written into the buffer by this callback
+ **/
+typedef int (S3PutObjectDataCallback)(int bufferSize, char *buffer,
+                                      void *callbackData);
+
+
+/**
+ * This callback is made during a get object operation, to provide the next
+ * chunk of data available from the S3 service constituting the contents of
+ * the object being fetched.  This callback is made repeatedly, each time
+ * providing the next chunk of data read, until the complete object contents
+ * have been passed through the callback in this way, or the callback
+ * returns an error status.
+ *
+ * @param bufferSize gives the number of bytes in buffer
+ * @param buffer is the data being passed into the callback
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3GetObjectDataCallback)(int bufferSize, const char *buffer,
+                                           void *callbackData);
+
+
+/**
+ * This callback is made after initiation of a multipart upload operation.  It
+ * indicates that the multi part upload has been created and provides the
+ * id that can be used to associate multi upload parts with the multi upload
+ * operation
+ *
+ * @param upload_id is the unique identifier if this multi part upload
+ *        operation, to be used in calls to S3_upload_part and
+ *        S3_complete_multipart_upload
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3MultipartInitialResponseCallback)(const char *upload_id,
+                                                      void *callbackData);
+
+
+/**
+ * This callback is made after completion of a part of a multipart upload
+ * operation.  It is a status callback indicating that the multipart upload is
+ * in progress and that some sub-set of the parts have completed upload.  The
+ * multipart upload is not considered fully complete in total until the commit
+ * response callback is made.
+ *
+ * @param isTruncated is ??? someone document this please
+ * @param nextKeyMarker is ??? someone document this please
+ * @param nextUploadIdMarker is ??? someone document this please
+ * @param uploadsCount is the number of elements in the [uploads] array
+ * @param uploads is an array of ??? someone document this please
+ * @param commonPrefixesCount is the number of elements in the
+ *        [commonPrefixes] array
+ * @param commonPrefixes is ??? someone document this please
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3ListMultipartUploadsResponseCallback)
+    (int isTruncated, const char *nextKeyMarker,
+     const char *nextUploadIdMarker, int uploadsCount,
+     const S3ListMultipartUpload *uploads, int commonPrefixesCount,
+     const char **commonPrefixes, void *callbackData);
+
+
+/**
+ * This callback is made with the result of a succesful "list parts" request
+ * to list the parts of a multi part upload (in progress???).
+ *
+ * @param isTruncated is ??? someone document this please
+ * @param nextPartNumberMarker is ??? someone document this please
+ * @param intiatorId is ??? someone document this please
+ * @param initiatorDisplayName is ??? someone document this please
+ * @param ownerId is ??? someone document this please
+ * @param ownerDisplayName is ??? someone document this please
+ * @param storageClass is ??? someone document this please
+ * @param partsCount is ??? someone document this please
+ * @param lastPartNumber is ??? someone document this please
+ * @param parts is ??? someone document this please
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3ListPartsResponseCallback)
+    (int isTruncated, const char *nextPartNumberMarker,
+     const char *initiatorId, const char *initiatorDisplayName,
+     const char *ownerId, const char *ownerDisplayName,
+     const char *storageClass, int partsCount, int lastPartNumber,
+     const S3ListPart *parts, void *callbackData);
+
+
+/**
+ * This callback is made after commit of a multipart upload operation.  It
+ * indicates that the data uploaded via the multipart upload operation has
+ * been committed.
+ *
+ * @param location is ??? someone please document this
+ * @param etag is the S3 etag of the complete object after the multipart
+ *        upload
+ * @param callbackData is the callback data as specified when the request
+ *        was issued.
+ * @return S3StatusOK to continue processing the request, anything else to
+ *         immediately abort the request with a status which will be
+ *         passed to the S3ResponseCompleteCallback for this request.
+ *         Typically, this will return either S3StatusOK or
+ *         S3StatusAbortedByCallback.
+ **/
+typedef S3Status (S3MultipartCommitResponseCallback)(const char *location,
+                                                     const char *etag,
+                                                     void *callbackData);
+
+
+/** **************************************************************************
+ * Callback Structures
+ ************************************************************************** **/
+
+
+/**
+ * An S3ResponseHandler defines the callbacks which are made for any
+ * request.
+ **/
+typedef struct S3ResponseHandler
+{
+    /**
+     * The propertiesCallback is made when the response properties have
+     * successfully been returned from S3.  This function may not be called
+     * if the response properties were not successfully returned from S3.
+     **/
+    S3ResponsePropertiesCallback *propertiesCallback;
+
+    /**
+     * The completeCallback is always called for every request made to S3,
+     * regardless of the outcome of the request.  It provides the status of
+     * the request upon its completion, as well as extra error details in the
+     * event of an S3 error.
+     **/
+    S3ResponseCompleteCallback *completeCallback;
+} S3ResponseHandler;
+
+
+/**
+ * An S3ListServiceHandler defines the callbacks which are made for
+ * list_service requests.
+ **/
+typedef struct S3ListServiceHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    /**
+     * The listServiceCallback is called as items are reported back from S3 as
+     * responses to the request
+     **/
+    S3ListServiceCallback *listServiceCallback;
+} S3ListServiceHandler;
+
+
+/**
+ * An S3ListBucketHandler defines the callbacks which are made for
+ * list_bucket requests.
+ **/
+typedef struct S3ListBucketHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    /**
+     * The listBucketCallback is called as items are reported back from S3 as
+     * responses to the request.  This may be called more than one time per
+     * list bucket request, each time providing more items from the list
+     * operation.
+     **/
+    S3ListBucketCallback *listBucketCallback;
+} S3ListBucketHandler;
+
+
+/**
+ * An S3PutObjectHandler defines the callbacks which are made for
+ * put_object requests.
+ **/
+typedef struct S3PutObjectHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    /**
+     * The putObjectDataCallback is called to acquire data to send to S3 as
+     * the contents of the put_object request.  It is made repeatedly until it
+     * returns a negative number (indicating that the request should be
+     * aborted), or 0 (indicating that all data has been supplied).
+     **/
+    S3PutObjectDataCallback *putObjectDataCallback;
+} S3PutObjectHandler;
+
+
+/**
+ * An S3GetObjectHandler defines the callbacks which are made for
+ * get_object requests.
+ **/
+typedef struct S3GetObjectHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    /**
+     * The getObjectDataCallback is called as data is read from S3 as the
+     * contents of the object being read in the get_object request.  It is
+     * called repeatedly until there is no more data provided in the request,
+     * or until the callback returns an error status indicating that the
+     * request should be aborted.
+     **/
+    S3GetObjectDataCallback *getObjectDataCallback;
+} S3GetObjectHandler;
+
+
+typedef struct S3MultipartInitialHandler {
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    S3MultipartInitialResponseCallback *responseXmlCallback;
+} S3MultipartInitialHandler;
+
+typedef struct S3MultipartCommitHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    /**
+     * The putObjectDataCallback is called to acquire data to send to S3 as
+     * the contents of the put_object request.  It is made repeatedly until it
+     * returns a negative number (indicating that the request should be
+     * aborted), or 0 (indicating that all data has been supplied).
+     **/
+    S3PutObjectDataCallback *putObjectDataCallback;
+    S3MultipartCommitResponseCallback *responseXmlCallback;
+} S3MultipartCommitHandler;
+
+typedef struct S3ListMultipartUploadsHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    S3ListMultipartUploadsResponseCallback *responseXmlCallback;
+} S3ListMultipartUploadsHandler;
+
+typedef struct S3ListPartsHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+    S3ListPartsResponseCallback *responseXmlCallback;
+} S3ListPartsHandler;
+
+typedef struct S3AbortMultipartUploadHandler
+{
+    /**
+     * responseHandler provides the properties and complete callback
+     **/
+    S3ResponseHandler responseHandler;
+
+} S3AbortMultipartUploadHandler;
+
+/** **************************************************************************
+ * General Library Functions
+ ************************************************************************** **/
+
+/**
+ * Initializes libs3 for use.  This function must be called before any other
+ * libs3 function is called.  It may be called multiple times, with the same
+ * effect as calling it once, as long as S3_deinitialize() is called an
+ * equal number of times when the program has finished.  This function is NOT
+ * thread-safe and must only be called by one thread at a time.
+ *
+ * @param userAgentInfo is a string that will be included in the User-Agent
+ *        header of every request made to the S3 service.  You may provide
+ *        NULL or the empty string if you don't care about this.  The value
+ *        will not be copied by this function and must remain unaltered by the
+ *        caller until S3_deinitialize() is called.
+ * @param flags is a bitmask of some combination of S3_INIT_XXX flag, or
+ *        S3_INIT_ALL, indicating which of the libraries that libs3 depends
+ *        upon should be initialized by S3_initialize().  Only if your program
+ *        initializes one of these dependency libraries itself should anything
+ *        other than S3_INIT_ALL be passed in for this bitmask.
+ *
+ *        You should pass S3_INIT_WINSOCK if and only if your application does
+ *        not initialize winsock elsewhere.  On non-Microsoft Windows
+ *        platforms it has no effect.
+ *
+ *        As a convenience, the macro S3_INIT_ALL is provided, which will do
+ *        all necessary initialization; however, be warned that things may
+ *        break if your application re-initializes the dependent libraries
+ *        later.
+ * @param defaultS3HostName is a string the specifies the default S3 server
+ *        hostname to use when making S3 requests; this value is used
+ *        whenever the hostName of an S3BucketContext is NULL.  If NULL is
+ *        passed here then the default of S3_DEFAULT_HOSTNAME will be used.
+ * @return One of:
+ *         S3StatusOK on success
+ *         S3StatusUriTooLong if the defaultS3HostName is longer than
+ *             S3_MAX_HOSTNAME_SIZE
+ *         S3StatusInternalError if dependent libraries could not be
+ *             initialized
+ *         S3StatusOutOfMemory on failure due to out of memory
+ **/
+S3Status S3_initialize(const char *userAgentInfo, int flags,
+                       const char *defaultS3HostName);
+
+
+/**
+ * Must be called once per program for each call to libs3_initialize().  After
+ * this call is complete, no libs3 function may be called except
+ * S3_initialize().
+ **/
+void S3_deinitialize();
+
+
+/**
+ * Returns a string with the textual name of an S3Status code
+ *
+ * @param status is S3Status code for which the textual name will be returned
+ * @return a string with the textual name of an S3Status code
+ **/
+const char *S3_get_status_name(S3Status status);
+
+
+/**
+ * This function may be used to validate an S3 bucket name as being in the
+ * correct form for use with the S3 service.  Amazon S3 limits the allowed
+ * characters in S3 bucket names, as well as imposing some additional rules on
+ * the length of bucket names and their structure.  There are actually two
+ * limits; one for bucket names used only in path-style URIs, and a more
+ * strict limit used for bucket names used in virtual-host-style URIs.  It is
+ * advisable to use only bucket names which meet the more strict requirements
+ * regardless of how the bucket expected to be used.
+ *
+ * This method does NOT validate that the bucket is available for use in the
+ * S3 service, so the return value of this function cannot be used to decide
+ * whether or not a bucket with the give name already exists in Amazon S3 or
+ * is accessible by the caller.  It merely validates that the bucket name is
+ * valid for use with S3.
+ *
+ * @param bucketName is the bucket name to validate
+ * @param uriStyle gives the URI style to validate the bucket name against.
+ *        It is advisable to always use S3UriStyleVirtuallHost.
+ * @return One of:
+ *         S3StatusOK if the bucket name was validates successfully
+ *         S3StatusConnectionFailed if the socket connection to the server
+ *             failed
+ *         S3StatusServerFailedVerification if the SSL certificate of the
+ *             server could not be verified.
+ *         S3StatusInvalidBucketNameTooLong if the bucket name exceeded the
+ *             length limitation for the URI style, which is 255 bytes for
+ *             path style URIs and 63 bytes for virtual host type URIs
+ *         S3StatusInvalidBucketNameTooShort if the bucket name is less than
+ *             3 characters
+ *         S3StatusInvalidBucketNameFirstCharacter if the bucket name as an
+ *             invalid first character, which is anything other than
+ *             an alphanumeric character
+ *         S3StatusInvalidBucketNameCharacterSequence if the bucket name
+ *             includes an invalid character sequence, which for virtual host
+ *             style buckets is ".-" or "-."
+ *         S3StatusInvalidBucketNameCharacter if the bucket name includes an
+ *             invalid character, which is anything other than alphanumeric,
+ *             '-', '.', or for path style URIs only, '_'.
+ *         S3StatusInvalidBucketNameDotQuadNotation if the bucket name is in
+ *             dot-quad notation, i.e. the form of an IP address, which is
+ *             not allowed by Amazon S3.
+ **/
+S3Status S3_validate_bucket_name(const char *bucketName, S3UriStyle uriStyle);
+
+
+/**
+ * Converts an XML representation of an ACL to a libs3 structured
+ * representation.  This method is not strictly necessary for working with
+ * ACLs using libs3, but may be convenient for users of the library who read
+ * ACLs from elsewhere in XML format and need to use these ACLs with libs3.
+ *
+ * @param aclXml is the XML representation of the ACL.  This must be a
+ *        zero-terminated character string.
+ * @param ownerId will be filled in with the Owner ID specified in the XML.
+ *        At most MAX_GRANTEE_USER_ID_SIZE bytes will be stored at this
+ *        location.
+ * @param ownerDisplayName will be filled in with the Owner Display Name
+ *        specified in the XML.  At most MAX_GRANTEE_DISPLAY_NAME_SIZE bytes
+ *        will be stored at this location.
+ * @param aclGrantCountReturn returns the number of S3AclGrant structures
+ *        returned in the aclGrantsReturned array
+ * @param aclGrants must be passed in as an array of at least S3_ACL_MAXCOUNT
+ *        structures, and on return from this function, the first
+ *        aclGrantCountReturn structures will be filled in with the ACLs
+ *        represented by the input XML.
+ * @return One of:
+ *         S3StatusOK on successful conversion of the ACL
+ *         S3StatusInternalError on internal error representing a bug in the
+ *             libs3 library
+ *         S3StatusXmlParseFailure if the XML document was malformed
+ **/
+S3Status S3_convert_acl(char *aclXml, char *ownerId, char *ownerDisplayName,
+                        int *aclGrantCountReturn, S3AclGrant *aclGrants);
+
+
+/**
+ * Returns nonzero if the status indicates that the request should be
+ * immediately retried, because the status indicates an error of a nature that
+ * is likely due to transient conditions on the local system or S3, such as
+ * network failures, or internal retryable errors reported by S3.  Returns
+ * zero otherwise.
+ *
+ * @param status is the status to evaluate
+ * @return nonzero if the status indicates a retryable error, 0 otherwise
+ **/
+int S3_status_is_retryable(S3Status status);
+
+
+/** **************************************************************************
+ * Request Context Management Functions
+ ************************************************************************** **/
+
+/**
+ * An S3RequestContext allows muliple requests to be serviced by the same
+ * thread simultaneously.  It is an optional parameter to all libs3 request
+ * functions, and if provided, the request is managed by the S3RequestContext;
+ * if not, the request is handled synchronously and is complete when the libs3
+ * request function has returned.
+ *
+ * @param requestContextReturn returns the newly-created S3RequestContext
+ *        structure, which if successfully returned, must be destroyed via a
+ *        call to S3_destroy_request_context when it is no longer needed.  If
+ *        an error status is returned from this function, then
+ *        requestContextReturn will not have been filled in, and
+ *        S3_destroy_request_context should not be called on it
+ * @return One of:
+ *         S3StatusOK if the request context was successfully created
+ *         S3StatusOutOfMemory if the request context could not be created due
+ *             to an out of memory error
+ **/
+S3Status S3_create_request_context(S3RequestContext **requestContextReturn);
+
+
+/**
+ * Destroys an S3RequestContext which was created with
+ * S3_create_request_context.  Any requests which are currently being
+ * processed by the S3RequestContext will immediately be aborted and their
+ * request completed callbacks made with the status S3StatusInterrupted.
+ *
+ * @param requestContext is the S3RequestContext to destroy
+ **/
+void S3_destroy_request_context(S3RequestContext *requestContext);
+
+
+/**
+ * Runs the S3RequestContext until all requests within it have completed,
+ * or until an error occurs.
+ *
+ * @param requestContext is the S3RequestContext to run until all requests
+ *            within it have completed or until an error occurs
+ * @return One of:
+ *         S3StatusOK if all requests were successfully run to completion
+ *         S3StatusConnectionFailed if the socket connection to the server
+ *             failed
+ *         S3StatusServerFailedVerification if the SSL certificate of the
+ *             server could not be verified.
+ *         S3StatusInternalError if an internal error prevented the
+ *             S3RequestContext from running one or more requests
+ *         S3StatusOutOfMemory if requests could not be run to completion
+ *             due to an out of memory error
+ **/
+S3Status S3_runall_request_context(S3RequestContext *requestContext);
+
+
+/**
+ * Does some processing of requests within the S3RequestContext.  One or more
+ * requests may have callbacks made on them and may complete.  This function
+ * processes any requests which have immediately available I/O, and will not
+ * block waiting for I/O on any request.  This function would normally be used
+ * with S3_get_request_context_fdsets.
+ *
+ * @param requestContext is the S3RequestContext to process
+ * @param requestsRemainingReturn returns the number of requests remaining
+ *            and not yet completed within the S3RequestContext after this
+ *            function returns.
+ * @return One of:
+ *         S3StatusOK if request processing proceeded without error
+ *         S3StatusConnectionFailed if the socket connection to the server
+ *             failed
+ *         S3StatusServerFailedVerification if the SSL certificate of the
+ *             server could not be verified.
+ *         S3StatusInternalError if an internal error prevented the
+ *             S3RequestContext from running one or more requests
+ *         S3StatusOutOfMemory if requests could not be processed due to
+ *             an out of memory error
+ **/
+S3Status S3_runonce_request_context(S3RequestContext *requestContext,
+                                    int *requestsRemainingReturn);
+
+
+/**
+ * This function, in conjunction allows callers to manually manage a set of
+ * requests using an S3RequestContext.  This function returns the set of file
+ * descriptors which the caller can watch (typically using select()), along
+ * with any other file descriptors of interest to the caller, and using
+ * whatever timeout (if any) the caller wishes, until one or more file
+ * descriptors in the returned sets become ready for I/O, at which point
+ * S3_runonce_request_context can be called to process requests with available
+ * I/O.
+ *
+ * @param requestContext is the S3RequestContext to get fd_sets from
+ * @param readFdSet is a pointer to an fd_set which will have all file
+ *        descriptors to watch for read events for the requests in the
+ *        S3RequestContext set into it upon return.  Should be zero'd out
+ *        (using FD_ZERO) before being passed into this function.
+ * @param writeFdSet is a pointer to an fd_set which will have all file
+ *        descriptors to watch for write events for the requests in the
+ *        S3RequestContext set into it upon return.  Should be zero'd out
+ *        (using FD_ZERO) before being passed into this function.
+ * @param exceptFdSet is a pointer to an fd_set which will have all file
+ *        descriptors to watch for exception events for the requests in the
+ *        S3RequestContext set into it upon return.  Should be zero'd out
+ *        (using FD_ZERO) before being passed into this function.
+ * @param maxFd returns the highest file descriptor set into any of the
+ *        fd_sets, or -1 if no file descriptors were set
+ * @return One of:
+ *         S3StatusOK if all fd_sets were successfully set
+ *         S3StatusInternalError if an internal error prevented this function
+ *             from completing successfully
+ **/
+S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext,
+                                       fd_set *readFdSet, fd_set *writeFdSet,
+                                       fd_set *exceptFdSet, int *maxFd);
+
+
+/**
+ * This function returns the maximum number of milliseconds that the caller of
+ * S3_runonce_request_context should wait on the fdsets obtained via a call to
+ * S3_get_request_context_fdsets.  In other words, this is essentially the
+ * select() timeout that needs to be used (shorter values are OK, but no
+ * longer than this) to ensure that internal timeout code of libs3 can work
+ * properly.  This function should be called right before select() each time
+ * select() on the request_context fdsets are to be performed by the libs3
+ * user.
+ *
+ * @param requestContext is the S3RequestContext to get the timeout from
+ * @return the maximum number of milliseconds to select() on fdsets.  Callers
+ *         could wait a shorter time if they wish, but not longer.
+ **/
+int64_t S3_get_request_context_timeout(S3RequestContext *requestContext);
+
+/**
+ * This function enables SSL peer certificate verification on a per-request
+ * context basis. If this is called, the context's value of verifyPeer will
+ * be used when processing requests. Otherwise, the default set by the
+ * flags to S3_initialize() are used.
+ *
+ * @param requestContext the S3RequestContext to set the verifyPeer flag on.
+ * @param verifyPeer a boolean value indicating whether to verify the peer
+ *        certificate or not.
+ */
+void S3_set_request_context_verify_peer(S3RequestContext *requestContext,
+                                        int verifyPeer);
+
+
+/** **************************************************************************
+ * S3 Utility Functions
+ ************************************************************************** **/
+
+/**
+ * Generates an HTTP authenticated query string, which may then be used by
+ * a browser (or other web client) to issue the request.  The request is
+ * implicitly a GET request; Amazon S3 is documented to only support this type
+ * of authenticated query string request.
+ *
+ * @param buffer is the output buffer for the authenticated query string.
+ *        It must be at least S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
+ *        length.
+ * @param bucketContext gives the bucket and associated parameters for the
+ *        request to generate.
+ * @param key gives the key which the authenticated request will GET.
+ * @param expires gives the number of seconds since Unix epoch for the
+ *        expiration date of the request; after this time, the request will
+ *        no longer be valid.  If this value is negative, the largest
+ *        expiration interval possible is used (one week).
+ * @param resource gives a sub-resource to be fetched for the request, or NULL
+ *        for none.  This should be of the form "?<resource>", i.e.
+ *        "?torrent".
+ * @param httpMethod the HTTP request method that will be used with the
+ *        generated query string (e.g. "GET").
+ * @return One of:
+ *         S3StatusUriTooLong if, due to an internal error, the generated URI
+ *             is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
+ *             length and thus will not fit into the supplied buffer
+ *         S3StatusOK on success
+ **/
+S3Status S3_generate_authenticated_query_string
+    (char *buffer, const S3BucketContext *bucketContext,
+     const char *key, int expires, const char *resource,
+     const char *httpMethod);
+
+
+/** **************************************************************************
+ * Service Functions
+ ************************************************************************** **/
+
+/**
+ * Lists all S3 buckets belonging to the access key id.
+ *
+ * @param protocol gives the protocol to use for this request
+ * @param accessKeyId gives the Amazon Access Key ID for which to list owned
+ *        buckets
+ * @param secretAccessKey gives the Amazon Secret Access Key for which to list
+ *        owned buckets
+ * @param securityToken gives the security token used to generate the Temporary
+ *        Security Credentials
+ * @param hostName is the S3 host name to use; if NULL is passed in, the
+ *        default S3 host as provided to S3_initialize() will be used.
+ * @param authRegion is the AWS region to use for the authorization signature
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_list_service(S3Protocol protocol, const char *accessKeyId,
+                     const char *secretAccessKey, const char *securityToken,
+                     const char *hostName, const char *authRegion,
+                     S3RequestContext *requestContext,
+                     int timeoutMs,
+                     const S3ListServiceHandler *handler, void *callbackData);
+
+
+/** **************************************************************************
+ * Bucket Functions
+ ************************************************************************** **/
+/**
+ * Tests the existence of an S3 bucket, additionally returning the bucket's
+ * location if it exists and is accessible.
+ *
+ * @param protocol gives the protocol to use for this request
+ * @param uriStyle gives the URI style to use for this request
+ * @param accessKeyId gives the Amazon Access Key ID for which to list owned
+ *        buckets
+ * @param secretAccessKey gives the Amazon Secret Access Key for which to list
+ *        owned buckets
+ * @param securityToken gives the security token used to generate the Temporary
+ *        Security Credentials
+ * @param hostName is the S3 host name to use; if NULL is passed in, the
+ *        default S3 host as provided to S3_initialize() will be used.
+ * @param bucketName is the bucket name to test
+ * @param authRegion is the AWS region to use for the authorization signature
+ * @param locationConstraintReturnSize gives the number of bytes in the
+ *        locationConstraintReturn parameter
+ * @param locationConstraintReturn provides the location into which to write
+ *        the name of the location constraint naming the geographic location
+ *        of the S3 bucket.  This must have at least as many characters in it
+ *        as specified by locationConstraintReturn, and should start out
+ *        NULL-terminated.  On successful completion of this request, this
+ *        will be set to the name of the geographic location of S3 bucket, or
+ *        will be left as a zero-length string if no location was available.
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
+                    const char *accessKeyId, const char *secretAccessKey,
+                    const char *securityToken, const char *hostName,
+                    const char *bucketName, const char *authRegion,
+                    int locationConstraintReturnSize,
+                    char *locationConstraintReturn,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Creates a new bucket.
+ *
+ * @param protocol gives the protocol to use for this request
+ * @param accessKeyId gives the Amazon Access Key ID for which to list owned
+ *        buckets
+ * @param secretAccessKey gives the Amazon Secret Access Key for which to list
+ *        owned buckets
+ * @param securityToken gives the security token used to generate the Temporary
+ *        Security Credentials
+ * @param hostName is the S3 host name to use; if NULL is passed in, the
+ *        default S3 host as provided to S3_initialize() will be used.
+ * @param bucketName is the name of the bucket to be created
+ * @param authRegion is the AWS region to use for the authorization signature
+ * @param cannedAcl gives the "REST canned ACL" to use for the created bucket
+ * @param locationConstraint if non-NULL, gives the geographic location for
+ *        the bucket to create.
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
+                      const char *secretAccessKey, const char *securityToken,
+                      const char *hostName, const char *bucketName,
+                      const char *authRegion, S3CannedAcl cannedAcl,
+                      const char *locationConstraint,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Deletes a bucket.  The bucket must be empty, or the status
+ * S3StatusErrorBucketNotEmpty will result.
+ *
+ * @param protocol gives the protocol to use for this request
+ * @param uriStyle gives the URI style to use for this request
+ * @param accessKeyId gives the Amazon Access Key ID for which to list owned
+ *        buckets
+ * @param secretAccessKey gives the Amazon Secret Access Key for which to list
+ *        owned buckets
+ * @param securityToken gives the security token used to generate the Temporary
+ *        Security Credentials
+ * @param hostName is the S3 host name to use; if NULL is passed in, the
+ *        default S3 host as provided to S3_initialize() will be used.
+ * @param bucketName is the name of the bucket to be deleted
+ * @param authRegion is the AWS region to use for the authorization signature
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
+                      const char *accessKeyId, const char *secretAccessKey,
+                      const char *securityToken, const char *hostName,
+                      const char *bucketName, const char *authRegion,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Lists keys within a bucket.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param prefix if present and non-empty, gives a prefix for matching keys
+ * @param marker if present and non-empty, only keys occuring after this value
+ *        will be listed
+ * @param delimiter if present and non-empty, causes keys that contain the
+ *        same string between the prefix and the first occurrence of the
+ *        delimiter to be rolled up into a single result element
+ * @param maxkeys is the maximum number of keys to return
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_list_bucket(const S3BucketContext *bucketContext,
+                    const char *prefix, const char *marker,
+                    const char *delimiter, int maxkeys,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ListBucketHandler *handler, void *callbackData);
+
+
+/** **************************************************************************
+ * Object Functions
+ ************************************************************************** **/
+
+/**
+ * Puts object data to S3.  This overwrites any existing object at that key;
+ * note that S3 currently only supports full-object upload.  The data to
+ * upload will be acquired by calling the handler's putObjectDataCallback.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to put to
+ * @param contentLength is required and gives the total number of bytes that
+ *        will be put
+ * @param putProperties optionally provides additional properties to apply to
+ *        the object that is being put to
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_put_object(const S3BucketContext *bucketContext, const char *key,
+                   uint64_t contentLength,
+                   const S3PutProperties *putProperties,
+                   S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3PutObjectHandler *handler, void *callbackData);
+
+
+/**
+ * Copies an object from one location to another.  The object may be copied
+ * back to itself, which is useful for replacing metadata without changing
+ * the object.
+ *
+ * @param bucketContext gives the source bucket and associated parameters for
+ *        this request
+ * @param key is the source key
+ * @param destinationBucket gives the destination bucket into which to copy
+ *        the object.  If NULL, the source bucket will be used.
+ * @param destinationKey gives the destination key into which to copy the
+ *        object.  If NULL, the source key will be used.
+ * @param putProperties optionally provides properties to apply to the object
+ *        that is being put to.  If not supplied (i.e. NULL is passed in),
+ *        then the copied object will retain the metadata of the copied
+ *        object.
+ * @param lastModifiedReturn returns the last modified date of the copied
+ *        object
+ * @param eTagReturnSize specifies the number of bytes provided in the
+ *        eTagReturn buffer
+ * @param eTagReturn is a buffer into which the resulting eTag of the copied
+ *        object will be written
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_copy_object(const S3BucketContext *bucketContext,
+                    const char *key, const char *destinationBucket,
+                    const char *destinationKey,
+                    const S3PutProperties *putProperties,
+                    int64_t *lastModifiedReturn, int eTagReturnSize,
+                    char *eTagReturn, S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Copies portion of an object from one location to another.  The object may
+ * be copied back to itself, which is useful for replacing metadata without
+ * changing the object.  Required when doing >5GB object copies.
+ *
+ * @param bucketContext gives the source bucket and associated parameters for
+ *        this request
+ * @param key is the source key
+ * @param destinationBucket gives the destination bucket into which to copy
+ *        the object.  If NULL, the source bucket will be used.
+ * @param destinationKey gives the destination key into which to copy the
+ *        object.  If NULL, the source key will be used.
+ * @param partNo is the sequence numebr of any multipart upload, 0 = non-multipart
+ * @param uploadId is the ID returned for a multipart initialize request, ignored
+ *        if partNo = 0
+ * @param startOffset is the starting point in original object to copy.
+ * @param count is the number of bytes starting at startOffset in original
+ *        object to copy.  0 indicates no-range (i.e. all)
+ * @param putProperties optionally provides properties to apply to the object
+ *        that is being put to.  If not supplied (i.e. NULL is passed in),
+ *        then the copied object will retain the metadata of the copied
+ *        object.
+ * @param lastModifiedReturn returns the last modified date of the copied
+ *        object
+ * @param eTagReturnSize specifies the number of bytes provided in the
+ *        eTagReturn buffer
+ * @param eTagReturn is a buffer into which the resulting eTag of the copied
+ *        object will be written
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_copy_object_range(const S3BucketContext *bucketContext,
+                          const char *key, const char *destinationBucket,
+                          const char *destinationKey,
+                          const int partNo, const char *uploadId,
+                          const unsigned long startOffset, const unsigned long count,
+                          const S3PutProperties *putProperties,
+                          int64_t *lastModifiedReturn, int eTagReturnSize,
+                          char *eTagReturn, S3RequestContext *requestContext,
+                          int timeoutMs,
+                          const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Gets an object from S3.  The contents of the object are returned in the
+ * handler's getObjectDataCallback.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to get
+ * @param getConditions if non-NULL, gives a set of conditions which must be
+ *        met in order for the request to succeed
+ * @param startByte gives the start byte for the byte range of the contents
+ *        to be returned
+ * @param byteCount gives the number of bytes to return; a value of 0
+ *        indicates that the contents up to the end should be returned
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_get_object(const S3BucketContext *bucketContext, const char *key,
+                   const S3GetConditions *getConditions,
+                   uint64_t startByte, uint64_t byteCount,
+                   S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3GetObjectHandler *handler, void *callbackData);
+
+
+/**
+ * Gets the response properties for the object, but not the object contents.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to get the properties of
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_head_object(const S3BucketContext *bucketContext, const char *key,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData);
+
+/**
+ * Deletes an object from S3.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to delete
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData);
+
+
+/** **************************************************************************
+ * Access Control List Functions
+ ************************************************************************** **/
+
+/**
+ * Gets the ACL for the given bucket or object.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to get the ACL of; or NULL to get the
+ *        ACL of the bucket
+ * @param ownerId must be supplied as a buffer of at least
+ *        S3_MAX_GRANTEE_USER_ID_SIZE bytes, and will be filled in with the
+ *        owner ID of the object/bucket
+ * @param ownerDisplayName must be supplied as a buffer of at least
+ *        S3_MAX_GRANTEE_DISPLAY_NAME_SIZE bytes, and will be filled in with
+ *        the display name of the object/bucket
+ * @param aclGrantCountReturn returns the number of S3AclGrant structures
+ *        returned in the aclGrants parameter
+ * @param aclGrants must be passed in as an array of at least
+ *        S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, which will be filled
+ *        in with the grant information for the ACL
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_get_acl(const S3BucketContext *bucketContext, const char *key,
+                char *ownerId, char *ownerDisplayName,
+                int *aclGrantCountReturn, S3AclGrant *aclGrants,
+                S3RequestContext *requestContext,
+                int timeoutMs,
+                const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Sets the ACL for the given bucket or object.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param key is the key of the object to set the ACL for; or NULL to set the
+ *        ACL for the bucket
+ * @param ownerId is the owner ID of the object/bucket.  Unfortunately, S3
+ *        requires this to be valid and thus it must have been fetched by a
+ *        previous S3 request, such as a list_buckets request.
+ * @param ownerDisplayName is the owner display name of the object/bucket.
+ *        Unfortunately, S3 requires this to be valid and thus it must have
+ *        been fetched by a previous S3 request, such as a list_buckets
+ *        request.
+ * @param aclGrantCount is the number of ACL grants to set for the
+ *        object/bucket
+ * @param aclGrants are the ACL grants to set for the object/bucket
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
+                const char *ownerId, const char *ownerDisplayName,
+                int aclGrantCount, const S3AclGrant *aclGrants,
+                S3RequestContext *requestContext,
+                int timeoutMs,
+                const S3ResponseHandler *handler, void *callbackData);
+
+
+/** **************************************************************************
+ * Lifecycle Control Functions
+ ************************************************************************** **/
+
+/**
+ * Gets the lifecycle for the given bucket
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param lifecycleXmlDocumentReturn buffer for lifecycle XML document
+ * @param lifecycleXmlDocumentBufferSize size of the buffer
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_get_lifecycle(const S3BucketContext *bucketContext,
+                      char *lifecycleXmlDocumentReturn, int lifecycleXmlDocumentBufferSize,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData);
+
+
+/**
+ * Sets the lifecycle for the given bucket
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request
+ * @param lifecycleXmlDocument Lifecycle configuration as an XML document
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_set_lifecycle(const S3BucketContext *bucketContext,
+                      const char *lifecycleXmlDocument,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData);
+
+/** **************************************************************************
+ * Server Access Log Functions
+ ************************************************************************** **/
+
+/**
+ * Gets the service access logging settings for a bucket.  The service access
+ * logging settings specify whether or not the S3 service will write service
+ * access logs for requests made for the given bucket, and if so, several
+ * settings controlling how these logs will be written.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being requested
+ * @param targetBucketReturn must be passed in as a buffer of at least
+ *        (S3_MAX_BUCKET_NAME_SIZE + 1) bytes in length, and will be filled
+ *        in with the target bucket name for access logging for the given
+ *        bucket, which is the bucket into which access logs for the specified
+ *        bucket will be written.  This is returned as an empty string if
+ *        service access logging is not enabled for the given bucket.
+ * @param targetPrefixReturn must be passed in as a buffer of at least
+ *        (S3_MAX_KEY_SIZE + 1) bytes in length, and will be filled in
+ *        with the key prefix for server access logs for the given bucket,
+ *        or the empty string if no such prefix is specified.
+ * @param aclGrantCountReturn returns the number of ACL grants that are
+ *        associated with the server access logging for the given bucket.
+ * @param aclGrants must be passed in as an array of at least
+ *        S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, and these will be
+ *        filled in with the target grants associated with the server access
+ *        logging for the given bucket, whose number is returned in the
+ *        aclGrantCountReturn parameter.  These grants will be applied to the
+ *        ACL of any server access logging log files generated by the S3
+ *        service for the given bucket.
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_get_server_access_logging(const S3BucketContext *bucketContext,
+                                  char *targetBucketReturn,
+                                  char *targetPrefixReturn,
+                                  int *aclGrantCountReturn,
+                                  S3AclGrant *aclGrants,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  const S3ResponseHandler *handler,
+                                  void *callbackData);
+
+
+/**
+ * Sets the service access logging settings for a bucket.  The service access
+ * logging settings specify whether or not the S3 service will write service
+ * access logs for requests made for the given bucket, and if so, several
+ * settings controlling how these logs will be written.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param targetBucket gives the target bucket name for access logging for the
+ *        given bucket, which is the bucket into which access logs for the
+ *        specified bucket will be written.
+ * @param targetPrefix is an option parameter which specifies the key prefix
+ *        for server access logs for the given bucket, or NULL if no such
+ *        prefix is to be used.
+ * @param aclGrantCount specifies the number of ACL grants that are to be
+ *        associated with the server access logging for the given bucket.
+ * @param aclGrants is as an array of S3AclGrant structures, whose number is
+ *        given by the aclGrantCount parameter.  These grants will be applied
+ *        to the ACL of any server access logging log files generated by the
+ *        S3 service for the given bucket.
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_set_server_access_logging(const S3BucketContext *bucketContext,
+                                  const char *targetBucket,
+                                  const char *targetPrefix, int aclGrantCount,
+                                  const S3AclGrant *aclGrants,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  const S3ResponseHandler *handler,
+                                  void *callbackData);
+
+
+/**
+ * This operation initiates a multipart upload and returns an upload ID.
+ * This upload ID is used to associate all the parts in the specific
+ * multipart upload. You specify this upload ID in each of your subsequent
+ * upload part requests
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param key is the source key
+ * @param putProperties optionally provides additional properties to apply to
+ *        the object that is being put to
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_initiate_multipart(S3BucketContext *bucketContext, const char *key,
+                           S3PutProperties *putProperties,
+                           S3MultipartInitialHandler *handler,
+                           S3RequestContext *requestContext,
+                           int timeoutMs,
+                           void *callbackData);
+
+
+/**
+ * This operation uploads a part in a multipart upload.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param key is the source key
+ * @param putProperties optionally provides additional properties to apply to
+ *        the object that is being put to
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param seq is a part number uniquely identifies a part and also
+ *        defines its position within the object being created.
+ * @param upload_id get from S3_initiate_multipart return
+ * @param partContentLength gives the size of the part, in bytes
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_upload_part(S3BucketContext *bucketContext, const char *key,
+                    S3PutProperties * putProperties,
+                    S3PutObjectHandler *handler,
+                    int seq, const char *upload_id, int partContentLength,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    void *callbackData);
+
+
+/**
+ * This operation completes a multipart upload by assembling previously
+ * uploaded parts.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param key is the source key
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param upload_id get from S3_initiate_multipart return
+ * @param contentLength gives the total size of the commit message, in bytes
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_complete_multipart_upload(S3BucketContext *bucketContext,
+                                  const char *key,
+                                  S3MultipartCommitHandler *handler,
+                                  const char *upload_id,
+                                  int contentLength,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  void *callbackData);
+
+
+/**
+ * This operation lists the parts that have been uploaded for a specific
+ * multipart upload.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param key is the source key
+ * @param partnumbermarker if present and non-empty, specifies the part after
+ *        which listing should begin.  Only parts with higher part numbers
+ *        will be listed.
+ * @param uploadid identifying the multipart upload whose parts are being
+ *        listed.
+ * @param encodingtype if present and non-empty, requests Amazon S3 to encode
+ *        the response and specifies the encoding method to use.
+ * @param maxparts Sets the maximum number of parts to return in the response
+ *        body. Default: 1,000
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_list_parts(S3BucketContext *bucketContext, const char *key,
+                   const char *partnumbermarker,
+                   const char *uploadid, const char *encodingtype,
+                   int maxparts, S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3ListPartsHandler *handler, void *callbackData);
+
+
+/**
+ * This operation aborts a multipart upload. After a multipart upload is
+ * aborted, no additional parts can be uploaded using that upload ID.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param key is the source key
+ * @param uploadId identifying the multipart upload whose parts are being
+ *        listed.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ **/
+void S3_abort_multipart_upload(S3BucketContext *bucketContext, const char *key,
+                               const char *uploadId,
+                               int timeoutMs,
+                               S3AbortMultipartUploadHandler *handler);
+
+
+/**
+ * This operation lists in-progress multipart uploads. An in-progress
+ * multipart upload is a multipart upload that has been initiated,
+ * using the Initiate Multipart Upload request, but has not yet been
+ * completed or aborted.
+ *
+ * @param bucketContext gives the bucket and associated parameters for this
+ *        request; this is the bucket for which service access logging is
+ *        being set
+ * @param prefix if present and non-empty, lists in-progress uploads only for
+ *        those keys that begin with the specified prefix.
+ * @param keymarker if present and non-empty, together with upload-id-marker,
+ *        this parameter specifies the multipart upload after which listing
+ *        should begin.
+ * @param uploadidmarker if present and non-empty, together with key-marker,
+ *        specifies the multipart upload after which listing should begin.
+ * @param encodingtype if present and non-empty, requests Amazon S3 to encode
+ *        the response and specifies the encoding method to use.
+ * @param delimiter if present and non-empty, is the character you use to
+ *        group keys.
+ * @param maxuploads sets the maximum number of multipart uploads,
+ *        from 1 to 1,000, to return in the response body.
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param timeoutMs if not 0 contains total request timeout in milliseconds
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_list_multipart_uploads(S3BucketContext *bucketContext,
+                               const char *prefix, const char *keymarker,
+                               const char *uploadidmarker,
+                               const char *encodingtype, const char *delimiter,
+                               int maxuploads, S3RequestContext *requestContext,
+                               int timeoutMs,
+                               const S3ListMultipartUploadsHandler *handler,
+                               void *callbackData);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LIBS3_H */

+ 45 - 0
libs/libs3/inc/mingw/pthread.h

@@ -0,0 +1,45 @@
+/** **************************************************************************
+ * pthread.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef PTHREAD_H
+#define PTHREAD_H
+
+// This is a minimal implementation of pthreads on Windows, implementing just
+// the APIs needed by libs3
+
+unsigned long pthread_self();
+
+typedef struct
+{
+    CRITICAL_SECTION criticalSection;
+} pthread_mutex_t;
+
+int pthread_mutex_init(pthread_mutex_t *mutex, void *);
+int pthread_mutex_lock(pthread_mutex_t *mutex);
+int pthread_mutex_unlock(pthread_mutex_t *mutex);
+int pthread_mutex_destroy(pthread_mutex_t *mutex);
+
+#endif /* PTHREAD_H */

+ 30 - 0
libs/libs3/inc/mingw/sys/select.h

@@ -0,0 +1,30 @@
+/** **************************************************************************
+ * select.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+// This file is used only on a MingW build, and converts an include of
+// sys/select.h to its Windows equivalent
+
+#include <winsock2.h>

+ 41 - 0
libs/libs3/inc/mingw/sys/utsname.h

@@ -0,0 +1,41 @@
+/** **************************************************************************
+ * utsname.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+// This file is used only on a MingW build, and provides an implementation
+// of POSIX sys/utsname.h
+
+#ifndef UTSNAME_H
+#define UTSNAME_H
+
+struct utsname
+{
+    const char *sysname;
+    const char *machine;
+};
+
+int uname(struct utsname *);
+
+#endif /* UTSNAME_H */

+ 191 - 0
libs/libs3/inc/request.h

@@ -0,0 +1,191 @@
+/** **************************************************************************
+ * request.h
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef REQUEST_H
+#define REQUEST_H
+
+#include "libs3.h"
+#include "error_parser.h"
+#include "response_headers_handler.h"
+#include "util.h"
+
+// Describes a type of HTTP request (these are our supported HTTP "verbs")
+typedef enum
+{
+    HttpRequestTypeGET,
+    HttpRequestTypeHEAD,
+    HttpRequestTypePUT,
+    HttpRequestTypeCOPY,
+    HttpRequestTypeDELETE,
+    HttpRequestTypePOST,
+    HttpRequestTypeInvalid
+} HttpRequestType;
+
+
+// This completely describes a request.  A RequestParams is not required to be
+// allocated from the heap and its lifetime is not assumed to extend beyond
+// the lifetime of the function to which it has been passed.
+typedef struct RequestParams
+{
+    // Request type, affects the HTTP verb used
+    HttpRequestType httpRequestType;
+
+    // Bucket context for request
+    S3BucketContext bucketContext;
+
+    // Key, if any
+    const char *key;
+
+    // Query params - ready to append to URI (i.e. ?p1=v1?p2=v2)
+    const char *queryParams;
+
+    // sub resource, like ?acl, ?location, ?torrent, ?logging
+    const char *subResource;
+
+    // If this is a copy operation, this gives the source bucket
+    const char *copySourceBucketName;
+
+    // If this is a copy operation, this gives the source key
+    const char *copySourceKey;
+
+    // Get conditions
+    const S3GetConditions *getConditions;
+
+    // Start byte
+    size_t startByte;
+
+    // Byte count
+    size_t byteCount;
+
+    // Put properties
+    const S3PutProperties *putProperties;
+
+    // Callback to be made when headers are available.  Might not be called.
+    S3ResponsePropertiesCallback *propertiesCallback;
+
+    // Callback to be made to supply data to send to S3.  Might not be called.
+    S3PutObjectDataCallback *toS3Callback;
+
+    // Number of bytes total that readCallback will supply
+    int64_t toS3CallbackTotalSize;
+
+    // Callback to be made that supplies data read from S3.
+    // Might not be called.
+    S3GetObjectDataCallback *fromS3Callback;
+
+    // Callback to be made when request is complete.  This will *always* be
+    // called.
+    S3ResponseCompleteCallback *completeCallback;
+
+    // Data passed to the callbacks
+    void *callbackData;
+
+    // Request timeout. If 0, no timeout will be enforced
+    int timeoutMs;
+} RequestParams;
+
+
+// This is the stuff associated with a request that needs to be on the heap
+// (and thus live while a curl_multi is in use).
+typedef struct Request
+{
+    // These put the request on a doubly-linked list of requests in a
+    // request context, *if* the request is in a request context (else these
+    // will both be 0)
+    struct Request *prev, *next;
+
+    // The status of this Request, as will be reported to the user via the
+    // complete callback
+    S3Status status;
+
+    // The HTTP code returned by the S3 server, if it is known.  Would rather
+    // not have to keep track of this but S3 doesn't always indicate its
+    // errors the same way
+    int httpResponseCode;
+
+    // The HTTP headers to use for the curl request
+    struct curl_slist *headers;
+
+    // The CURL structure driving the request
+    CURL *curl;
+
+    // libcurl requires that the uri be stored outside of the curl handle
+    char uri[MAX_URI_SIZE + 1];
+
+    // Callback to be made when headers are available.  Might not be called.
+    S3ResponsePropertiesCallback *propertiesCallback;
+
+    // Callback to be made to supply data to send to S3.  Might not be called.
+    S3PutObjectDataCallback *toS3Callback;
+
+    // Number of bytes total that readCallback has left to supply
+    int64_t toS3CallbackBytesRemaining;
+
+    // Callback to be made that supplies data read from S3.
+    // Might not be called.
+    S3GetObjectDataCallback *fromS3Callback;
+
+    // Callback to be made when request is complete.  This will *always* be
+    // called.
+    S3ResponseCompleteCallback *completeCallback;
+
+    // Data passed to the callbacks
+    void *callbackData;
+
+    // Handler of response headers
+    ResponseHeadersHandler responseHeadersHandler;
+
+    // This is set to nonzero after the properties callback has been made
+    int propertiesCallbackMade;
+
+    // Parser of errors
+    ErrorParser errorParser;
+} Request;
+
+
+// Request functions
+// ----------------------------------------------------------------------------
+
+// Initialize the API
+S3Status request_api_initialize(const char *userAgentInfo, int flags,
+                                const char *hostName);
+
+// Deinitialize the API
+void request_api_deinitialize();
+
+// Perform a request; if context is 0, performs the request immediately;
+// otherwise, sets it up to be performed by context.
+void request_perform(const RequestParams *params, S3RequestContext *context);
+
+// Called by the internal request code or internal request context code when a
+// curl has finished the request
+void request_finish(Request *request);
+
+// Convert a CURLE code to an S3Status
+S3Status request_curl_code_to_status(CURLcode code);
+
+
+#endif /* REQUEST_H */

+ 43 - 0
libs/libs3/inc/request_context.h

@@ -0,0 +1,43 @@
+/** **************************************************************************
+ * request_context.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef REQUEST_CONTEXT_H
+#define REQUEST_CONTEXT_H
+
+#include "libs3.h"
+
+struct S3RequestContext
+{
+    CURLM *curlm;
+    
+    int verifyPeerSet;
+    long verifyPeer;
+
+    struct Request *requests;
+};
+
+
+#endif /* REQUEST_CONTEXT_H */

+ 64 - 0
libs/libs3/inc/response_headers_handler.h

@@ -0,0 +1,64 @@
+/** **************************************************************************
+ * response_headers_handler.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef RESPONSE_HEADERS_HANDLER_H
+#define RESPONSE_HEADERS_HANDLER_H
+
+#include "libs3.h"
+#include "string_buffer.h"
+#include "util.h"
+
+
+typedef struct ResponseHeadersHandler
+{
+    // The structure to pass to the headers callback.  This is filled in by
+    // the ResponseHeadersHandler from the headers added to it.
+    S3ResponseProperties responseProperties;
+
+    // Set to 1 after the done call has been made
+    int done;
+
+    // copied into here.  We allow 128 bytes for each header, plus \0 term.
+    string_multibuffer(responsePropertyStrings, 5 * 129);
+
+    // responseproperties.metaHeaders strings get copied into here
+    string_multibuffer(responseMetaDataStrings, 
+                       COMPACTED_METADATA_BUFFER_SIZE);
+
+    // Response meta data
+    S3NameValue responseMetaData[S3_MAX_METADATA_COUNT];
+} ResponseHeadersHandler;
+
+
+void response_headers_handler_initialize(ResponseHeadersHandler *handler);
+
+void response_headers_handler_add(ResponseHeadersHandler *handler,
+                                  char *data, int dataLen);
+
+void response_headers_handler_done(ResponseHeadersHandler *handler, 
+                                   CURL *curl);
+
+#endif /* RESPONSE_HEADERS_HANDLER_H */

+ 76 - 0
libs/libs3/inc/simplexml.h

@@ -0,0 +1,76 @@
+/** **************************************************************************
+ * simplexml.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef SIMPLEXML_H
+#define SIMPLEXML_H
+
+#include "libs3.h"
+
+
+// Simple XML callback.
+//
+// elementPath: is the full "path" of the element; i.e.
+// <foo><bar><baz>data</baz></bar></foo> would have 'data' in the element
+// foo/bar/baz.
+// 
+// Return of anything other than S3StatusOK causes the calling
+// simplexml_add() function to immediately stop and return the status.
+//
+// data is passed in as 0 on end of element
+typedef S3Status (SimpleXmlCallback)(const char *elementPath, const char *data,
+                                     int dataLen, void *callbackData);
+
+typedef struct SimpleXml
+{
+    void *xmlParser;
+
+    SimpleXmlCallback *callback;
+
+    void *callbackData;
+
+    char elementPath[512];
+
+    int elementPathLen;
+
+    S3Status status;
+} SimpleXml;
+
+
+// Simple XML parsing
+// ----------------------------------------------------------------------------
+
+// Always call this, even if the simplexml doesn't end up being used
+void simplexml_initialize(SimpleXml *simpleXml, SimpleXmlCallback *callback,
+                          void *callbackData);
+
+S3Status simplexml_add(SimpleXml *simpleXml, const char *data, int dataLen);
+
+
+// Always call this
+void simplexml_deinitialize(SimpleXml *simpleXml);
+
+
+#endif /* SIMPLEXML_H */

+ 107 - 0
libs/libs3/inc/string_buffer.h

@@ -0,0 +1,107 @@
+/** **************************************************************************
+ * string_buffer.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef STRING_BUFFER_H
+#define STRING_BUFFER_H
+
+#include <stdio.h>
+
+
+// Declare a string_buffer with the given name of the given maximum length
+#define string_buffer(name, len)                                        \
+    char name[len + 1];                                                 \
+    int name##Len
+
+
+// Initialize a string_buffer
+#define string_buffer_initialize(sb)                                    \
+    do {                                                                \
+        sb[0] = 0;                                                      \
+        sb##Len = 0;                                                    \
+    } while (0)
+
+
+// Append [len] bytes of [str] to [sb], setting [all_fit] to 1 if it fit, and
+// 0 if it did not
+#define string_buffer_append(sb, str, len, all_fit)                     \
+    do {                                                                \
+        sb##Len += snprintf(&(sb[sb##Len]), sizeof(sb) - sb##Len - 1,   \
+                            "%.*s", (int) (len), str);                  \
+        if (sb##Len > (int) (sizeof(sb) - 1)) {                         \
+            sb##Len = sizeof(sb) - 1;                                   \
+            all_fit = 0;                                                \
+        }                                                               \
+        else {                                                          \
+            all_fit = 1;                                                \
+        }                                                               \
+    } while (0)
+
+
+// Declare a string multibuffer with the given name of the given maximum size
+#define string_multibuffer(name, size)                                  \
+    char name[size];                                                    \
+    int name##Size
+
+
+// Initialize a string_multibuffer
+#define string_multibuffer_initialize(smb)                              \
+    do {                                                                \
+        smb##Size = 0;                                                  \
+    } while (0)
+
+
+// Evaluates to the current string within the string_multibuffer
+#define string_multibuffer_current(smb)                                  \
+    &(smb[smb##Size])
+
+
+// Adds a new string to the string_multibuffer
+#define string_multibuffer_add(smb, str, len, all_fit)                  \
+    do {                                                                \
+        smb##Size += (snprintf(&(smb[smb##Size]),                       \
+                               sizeof(smb) - smb##Size,                 \
+                               "%.*s", (int) (len), str) + 1);          \
+        if (smb##Size > (int) sizeof(smb)) {                            \
+            smb##Size = sizeof(smb);                                    \
+            all_fit = 0;                                                \
+        }                                                               \
+        else {                                                          \
+            all_fit = 1;                                                \
+        }                                                               \
+    } while (0)
+
+
+// Appends to the current string in the string_multibuffer.  There must be a
+// current string, meaning that string_multibuffer_add must have been called
+// at least once for this string_multibuffer.
+#define string_multibuffer_append(smb, str, len, all_fit)               \
+    do {                                                                \
+        smb##Size--;                                                    \
+        string_multibuffer_add(smb, str, len, all_fit);                 \
+    } while (0)
+
+
+#endif /* STRING_BUFFER_H */

+ 90 - 0
libs/libs3/inc/util.h

@@ -0,0 +1,90 @@
+/** **************************************************************************
+ * util.h
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <curl/curl.h>
+#include <curl/multi.h>
+#include <stdint.h>
+#include "libs3.h"
+
+// acl groups
+#define ACS_URL "http://acs.amazonaws.com/groups/"
+
+#define ACS_GROUP_ALL_USERS     ACS_URL "global/AllUsers"
+#define ACS_GROUP_AWS_USERS     ACS_URL "global/AuthenticatedUsers"
+#define ACS_GROUP_LOG_DELIVERY  ACS_URL "s3/LogDelivery"
+
+
+// Derived from S3 documentation
+
+// This is the maximum number of bytes needed in a "compacted meta header"
+// buffer, which is a buffer storing all of the compacted meta headers.
+#define COMPACTED_METADATA_BUFFER_SIZE \
+    (S3_MAX_METADATA_COUNT * sizeof(S3_METADATA_HEADER_NAME_PREFIX "n: v"))
+
+// Maximum url encoded key size; since every single character could require
+// URL encoding, it's 3 times the size of a key (since each url encoded
+// character takes 3 characters: %NN)
+#define MAX_URLENCODED_KEY_SIZE (3 * S3_MAX_KEY_SIZE)
+
+// This is the maximum size of a URI that could be passed to S3:
+// https://s3.amazonaws.com/${BUCKET}/${KEY}?acl
+// 255 is the maximum bucket length
+#define MAX_URI_SIZE \
+    ((sizeof("https:///") - 1) + S3_MAX_HOSTNAME_SIZE + 255 + 1 +       \
+     MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent") - 1) + 1)
+
+// Maximum size of a canonicalized resource
+#define MAX_CANONICALIZED_RESOURCE_SIZE \
+    (1 + 255 + 1 + MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent") - 1) + 1)
+
+#define MAX_ACCESS_KEY_ID_LENGTH 32
+
+// Maximum length of a credential string
+// <access key>/<yyyymmdd>/<region>/s3/aws4_request
+#define MAX_CREDENTIAL_SIZE \
+   (MAX_ACCESS_KEY_ID_LENGTH + 1) + 8 + 1 + 32 + sizeof("/s3/aws4_request")
+
+// Utilities -----------------------------------------------------------------
+
+// URL-encodes a string from [src] into [dest].  [dest] must have at least
+// 3x the number of characters that [source] has.   At most [maxSrcSize] bytes
+// from [src] are encoded; if more are present in [src], 0 is returned from
+// urlEncode, else nonzero is returned.
+int urlEncode(char *dest, const char *src, int maxSrcSize, int encodeSlash);
+
+// Returns < 0 on failure >= 0 on success
+int64_t parseIso8601Time(const char *str);
+
+uint64_t parseUnsignedInt(const char *str);
+
+// Because Windows seems to be missing isblank(), use our own; it's a very
+// easy function to write in any case
+int is_blank(char c);
+
+#endif /* UTIL_H */

+ 81 - 0
libs/libs3/libs3.spec

@@ -0,0 +1,81 @@
+Summary: C Library and Tools for Amazon S3 Access
+Name: libs3
+Version: trunk
+Release: 1
+License: LGPL
+Group: Networking/Utilities
+URL: http://sourceforge.net/projects/reallibs3
+Source0: libs3-trunk.tar.gz
+Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root
+# Want to include curl dependencies, but older Fedora Core uses curl-devel,
+# and newer Fedora Core uses libcurl-devel ... have to figure out how to
+# handle this problem, but for now, just don't check for any curl libraries
+# Buildrequires: curl-devel
+Buildrequires: libxml2-devel
+Buildrequires: openssl-devel
+Buildrequires: make
+# Requires: libcurl
+Requires: libxml2
+Requires: openssl
+
+%define debug_package %{nil}
+
+%description
+This package includes the libs3 shared object library, needed to run
+applications compiled against libs3, and additionally contains the s3
+utility for accessing Amazon S3.
+
+%package devel
+Summary: Headers and documentation for libs3
+Group: Development/Libraries
+Requires: %{name} = %{version}-%{release}
+
+%description devel
+This library provides an API for using Amazon's S3 service (see
+http://s3.amazonaws.com).  Its design goals are:
+
+ - To provide a simple and straightforward API for accessing all of S3's
+   functionality
+ - To not require the developer using libs3 to need to know anything about:
+     - HTTP
+     - XML
+     - SSL
+   In other words, this API is meant to stand on its own, without requiring
+   any implicit knowledge of how S3 services are accessed using HTTP
+   protocols.
+ - To be usable from multithreaded code
+ - To be usable by code which wants to process multiple S3 requests
+   simultaneously from a single thread
+ - To be usable in the simple, straightforward way using sequentialized
+   blocking requests
+
+
+%prep
+%setup -q
+
+%build
+BUILD=$RPM_BUILD_ROOT/build make exported
+
+%install
+BUILD=$RPM_BUILD_ROOT/build DESTDIR=$RPM_BUILD_ROOT/usr make install
+rm -rf $RPM_BUILD_ROOT/build
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+/usr/bin/s3
+/usr/lib/libs3.so*
+
+%files devel
+%defattr(-,root,root,-)
+/usr/include/libs3.h
+/usr/lib/libs3.a
+
+%changelog
+* Sat Aug 09 2008  <bryan@ischo,com> Bryan Ischo
+- Split into regular and devel packages.
+
+* Tue Aug 05 2008  <bryan@ischo,com> Bryan Ischo
+- Initial build.

+ 27 - 0
libs/libs3/mswin/libs3.def

@@ -0,0 +1,27 @@
+EXPORTS
+S3_convert_acl
+S3_copy_object
+S3_create_bucket
+S3_create_request_context
+S3_deinitialize
+S3_delete_bucket
+S3_delete_object
+S3_destroy_request_context
+S3_generate_authenticated_query_string
+S3_get_acl
+S3_get_object
+S3_get_request_context_fdsets
+S3_get_server_access_logging
+S3_get_status_name
+S3_head_object
+S3_initialize
+S3_list_bucket
+S3_list_service
+S3_put_object
+S3_runall_request_context
+S3_runonce_request_context
+S3_set_acl
+S3_set_server_access_logging
+S3_status_is_retryable
+S3_test_bucket
+S3_validate_bucket_name

+ 9 - 0
libs/libs3/mswin/rmrf.bat

@@ -0,0 +1,9 @@
+@echo off
+
+if exist "%1". (
+   rmdir /S /Q "%1"
+)
+
+if exist "%1". (
+   del /Q "%1"
+)

+ 774 - 0
libs/libs3/src/bucket.c

@@ -0,0 +1,774 @@
+/** **************************************************************************
+ * bucket.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <string.h>
+#include <stdlib.h>
+#include "libs3.h"
+#include "request.h"
+#include "simplexml.h"
+
+// test bucket ---------------------------------------------------------------
+
+typedef struct TestBucketData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    int locationConstraintReturnSize;
+    char *locationConstraintReturn;
+
+    string_buffer(locationConstraint, 256);
+} TestBucketData;
+
+
+static S3Status testBucketXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    TestBucketData *tbData = (TestBucketData *) callbackData;
+
+    int fit;
+
+    if (data && !strcmp(elementPath, "LocationConstraint")) {
+        string_buffer_append(tbData->locationConstraint, data, dataLen, fit);
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status testBucketPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    TestBucketData *tbData = (TestBucketData *) callbackData;
+
+    return (*(tbData->responsePropertiesCallback))
+        (responseProperties, tbData->callbackData);
+}
+
+
+static S3Status testBucketDataCallback(int bufferSize, const char *buffer,
+                                       void *callbackData)
+{
+    TestBucketData *tbData = (TestBucketData *) callbackData;
+
+    return simplexml_add(&(tbData->simpleXml), buffer, bufferSize);
+}
+
+
+static void testBucketCompleteCallback(S3Status requestStatus,
+                                       const S3ErrorDetails *s3ErrorDetails,
+                                       void *callbackData)
+{
+    TestBucketData *tbData = (TestBucketData *) callbackData;
+
+    // Copy the location constraint into the return buffer
+    snprintf(tbData->locationConstraintReturn,
+             tbData->locationConstraintReturnSize, "%s",
+             tbData->locationConstraint);
+
+    (*(tbData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, tbData->callbackData);
+
+    simplexml_deinitialize(&(tbData->simpleXml));
+
+    free(tbData);
+}
+
+void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
+                    const char *accessKeyId, const char *secretAccessKey,
+                    const char *securityToken, const char *hostName,
+                    const char *bucketName, const char *authRegion,
+                    int locationConstraintReturnSize,
+                    char *locationConstraintReturn,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    TestBucketData *tbData =
+        (TestBucketData *) malloc(sizeof(TestBucketData));
+    if (!tbData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    simplexml_initialize(&(tbData->simpleXml), &testBucketXmlCallback, tbData);
+
+    tbData->responsePropertiesCallback = handler->propertiesCallback;
+    tbData->responseCompleteCallback = handler->completeCallback;
+    tbData->callbackData = callbackData;
+
+    tbData->locationConstraintReturnSize = locationConstraintReturnSize;
+    tbData->locationConstraintReturn = locationConstraintReturn;
+    string_buffer_initialize(tbData->locationConstraint);
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { hostName,                                   // hostName
+          bucketName,                                 // bucketName
+          protocol,                                   // protocol
+          uriStyle,                                   // uriStyle
+          accessKeyId,                                // accessKeyId
+          secretAccessKey,                            // secretAccessKey
+          securityToken,                              // securityToken
+          authRegion },                               // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        "location",                                   // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &testBucketPropertiesCallback,                // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &testBucketDataCallback,                      // fromS3Callback
+        &testBucketCompleteCallback,                  // completeCallback
+        tbData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// create bucket -------------------------------------------------------------
+
+typedef struct CreateBucketData
+{
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    char doc[1024];
+    int docLen, docBytesWritten;
+} CreateBucketData;
+
+
+static S3Status createBucketPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    CreateBucketData *cbData = (CreateBucketData *) callbackData;
+
+    return (*(cbData->responsePropertiesCallback))
+        (responseProperties, cbData->callbackData);
+}
+
+
+static int createBucketDataCallback(int bufferSize, char *buffer,
+                                    void *callbackData)
+{
+    CreateBucketData *cbData = (CreateBucketData *) callbackData;
+
+    if (!cbData->docLen) {
+        return 0;
+    }
+
+    int remaining = (cbData->docLen - cbData->docBytesWritten);
+
+    int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+    if (!toCopy) {
+        return 0;
+    }
+
+    memcpy(buffer, &(cbData->doc[cbData->docBytesWritten]), toCopy);
+
+    cbData->docBytesWritten += toCopy;
+
+    return toCopy;
+}
+
+
+static void createBucketCompleteCallback(S3Status requestStatus,
+                                         const S3ErrorDetails *s3ErrorDetails,
+                                         void *callbackData)
+{
+    CreateBucketData *cbData = (CreateBucketData *) callbackData;
+
+    (*(cbData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, cbData->callbackData);
+
+    free(cbData);
+}
+
+static S3Status createBucketFromS3Callback(int bufferSize, const char *buffer,
+                                           void *callbackData)
+{
+    // Sometimes S3 sends response body. We sillently ignore it.
+
+    (void)bufferSize;  // avoid unused parameter warning
+    (void)buffer;  // avoid unused parameter warning
+    (void)callbackData;  // avoid unused parameter warning
+
+    return S3StatusOK;
+}
+
+void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
+                      const char *secretAccessKey, const char *securityToken,
+                      const char *hostName, const char *bucketName,
+                      const char *authRegion, S3CannedAcl cannedAcl,
+                      const char *locationConstraint,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    CreateBucketData *cbData =
+        (CreateBucketData *) malloc(sizeof(CreateBucketData));
+    if (!cbData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    cbData->responsePropertiesCallback = handler->propertiesCallback;
+    cbData->responseCompleteCallback = handler->completeCallback;
+    cbData->callbackData = callbackData;
+
+    if (locationConstraint) {
+        cbData->docLen =
+            snprintf(cbData->doc, sizeof(cbData->doc),
+                     "<CreateBucketConfiguration><LocationConstraint>"
+                     "%s</LocationConstraint></CreateBucketConfiguration>",
+                     locationConstraint);
+        cbData->docBytesWritten = 0;
+    }
+    else {
+        cbData->docLen = 0;
+    }
+
+    // Set up S3PutProperties
+    S3PutProperties properties =
+    {
+        0,                                       // contentType
+        0,                                       // md5
+        0,                                       // cacheControl
+        0,                                       // contentDispositionFilename
+        0,                                       // contentEncoding
+       -1,                                       // expires
+        cannedAcl,                               // cannedAcl
+        0,                                       // metaDataCount
+        0,                                       // metaData
+        0                                        // useServerSideEncryption
+    };
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { hostName,                                   // hostName
+          bucketName,                                 // bucketName
+          protocol,                                   // protocol
+          S3UriStylePath,                             // uriStyle
+          accessKeyId,                                // accessKeyId
+          secretAccessKey,                            // secretAccessKey
+          securityToken,                              // securityToken
+          authRegion },                               // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        &properties,                                  // putProperties
+        &createBucketPropertiesCallback,              // propertiesCallback
+        &createBucketDataCallback,                    // toS3Callback
+        cbData->docLen,                               // toS3CallbackTotalSize
+        createBucketFromS3Callback,                   // fromS3Callback
+        &createBucketCompleteCallback,                // completeCallback
+        cbData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// delete bucket -------------------------------------------------------------
+
+typedef struct DeleteBucketData
+{
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+} DeleteBucketData;
+
+
+static S3Status deleteBucketPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
+
+    return (*(dbData->responsePropertiesCallback))
+        (responseProperties, dbData->callbackData);
+}
+
+
+static void deleteBucketCompleteCallback(S3Status requestStatus,
+                                         const S3ErrorDetails *s3ErrorDetails,
+                                         void *callbackData)
+{
+    DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
+
+    (*(dbData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, dbData->callbackData);
+
+    free(dbData);
+}
+
+
+void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
+                      const char *accessKeyId, const char *secretAccessKey,
+                      const char *securityToken, const char *hostName,
+                      const char *bucketName, const char *authRegion,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    DeleteBucketData *dbData =
+        (DeleteBucketData *) malloc(sizeof(DeleteBucketData));
+    if (!dbData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    dbData->responsePropertiesCallback = handler->propertiesCallback;
+    dbData->responseCompleteCallback = handler->completeCallback;
+    dbData->callbackData = callbackData;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeDELETE,                        // httpRequestType
+        { hostName,                                   // hostName
+          bucketName,                                 // bucketName
+          protocol,                                   // protocol
+          uriStyle,                                   // uriStyle
+          accessKeyId,                                // accessKeyId
+          secretAccessKey,                            // secretAccessKey
+          securityToken,                              // securityToken
+          authRegion },                               // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &deleteBucketPropertiesCallback,              // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        &deleteBucketCompleteCallback,                // completeCallback
+        dbData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// list bucket ----------------------------------------------------------------
+
+typedef struct ListBucketContents
+{
+    string_buffer(key, 1024);
+    string_buffer(lastModified, 256);
+    string_buffer(eTag, 256);
+    string_buffer(size, 24);
+    string_buffer(ownerId, 256);
+    string_buffer(ownerDisplayName, 256);
+} ListBucketContents;
+
+
+static void initialize_list_bucket_contents(ListBucketContents *contents)
+{
+    string_buffer_initialize(contents->key);
+    string_buffer_initialize(contents->lastModified);
+    string_buffer_initialize(contents->eTag);
+    string_buffer_initialize(contents->size);
+    string_buffer_initialize(contents->ownerId);
+    string_buffer_initialize(contents->ownerDisplayName);
+}
+
+// We read up to 32 Contents at a time
+#define MAX_CONTENTS 32
+// We read up to 8 CommonPrefixes at a time
+#define MAX_COMMON_PREFIXES 8
+
+typedef struct ListBucketData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ListBucketCallback *listBucketCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    string_buffer(isTruncated, 64);
+    string_buffer(nextMarker, 1024);
+
+    int contentsCount;
+    ListBucketContents contents[MAX_CONTENTS];
+
+    int commonPrefixesCount;
+    char commonPrefixes[MAX_COMMON_PREFIXES][1024];
+    int commonPrefixLens[MAX_COMMON_PREFIXES];
+} ListBucketData;
+
+
+static void initialize_list_bucket_data(ListBucketData *lbData)
+{
+    lbData->contentsCount = 0;
+    initialize_list_bucket_contents(lbData->contents);
+    lbData->commonPrefixesCount = 0;
+    lbData->commonPrefixes[0][0] = 0;
+    lbData->commonPrefixLens[0] = 0;
+}
+
+
+static S3Status make_list_bucket_callback(ListBucketData *lbData)
+{
+    int i;
+
+    // Convert IsTruncated
+    int isTruncated = (!strcmp(lbData->isTruncated, "true") ||
+                       !strcmp(lbData->isTruncated, "1")) ? 1 : 0;
+
+    // Convert the contents
+    S3ListBucketContent contents[lbData->contentsCount];
+
+    int contentsCount = lbData->contentsCount;
+    for (i = 0; i < contentsCount; i++) {
+        S3ListBucketContent *contentDest = &(contents[i]);
+        ListBucketContents *contentSrc = &(lbData->contents[i]);
+        contentDest->key = contentSrc->key;
+        contentDest->lastModified =
+            parseIso8601Time(contentSrc->lastModified);
+        contentDest->eTag = contentSrc->eTag;
+        contentDest->size = parseUnsignedInt(contentSrc->size);
+        contentDest->ownerId =
+            contentSrc->ownerId[0] ?contentSrc->ownerId : 0;
+        contentDest->ownerDisplayName = (contentSrc->ownerDisplayName[0] ?
+                                         contentSrc->ownerDisplayName : 0);
+    }
+
+    // Make the common prefixes array
+    int commonPrefixesCount = lbData->commonPrefixesCount;
+    char *commonPrefixes[commonPrefixesCount];
+    for (i = 0; i < commonPrefixesCount; i++) {
+        commonPrefixes[i] = lbData->commonPrefixes[i];
+    }
+
+    return (*(lbData->listBucketCallback))
+        (isTruncated, lbData->nextMarker,
+         contentsCount, contents, commonPrefixesCount,
+         (const char **) commonPrefixes, lbData->callbackData);
+}
+
+
+static S3Status listBucketXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    ListBucketData *lbData = (ListBucketData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "ListBucketResult/IsTruncated")) {
+            string_buffer_append(lbData->isTruncated, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListBucketResult/NextMarker")) {
+            string_buffer_append(lbData->nextMarker, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListBucketResult/Contents/Key")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append(contents->key, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListBucketResult/Contents/LastModified")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append(contents->lastModified, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListBucketResult/Contents/ETag")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append(contents->eTag, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListBucketResult/Contents/Size")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append(contents->size, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListBucketResult/Contents/Owner/ID")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append(contents->ownerId, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListBucketResult/Contents/Owner/DisplayName")) {
+            ListBucketContents *contents =
+                &(lbData->contents[lbData->contentsCount]);
+            string_buffer_append
+                (contents->ownerDisplayName, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListBucketResult/CommonPrefixes/Prefix")) {
+            int which = lbData->commonPrefixesCount;
+            size_t oldLen = lbData->commonPrefixLens[which];
+            lbData->commonPrefixLens[which] +=
+                snprintf(lbData->commonPrefixes[which]+oldLen,
+                         sizeof(lbData->commonPrefixes[which]) -
+                         oldLen - 1,
+                         "%.*s", dataLen, data);
+            if (lbData->commonPrefixLens[which] >=
+                (int) sizeof(lbData->commonPrefixes[which])) {
+                return S3StatusXmlParseFailure;
+            }
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "ListBucketResult/Contents")) {
+            // Finished a Contents
+            lbData->contentsCount++;
+            if (lbData->contentsCount == MAX_CONTENTS) {
+                // Make the callback
+                S3Status status = make_list_bucket_callback(lbData);
+                if (status != S3StatusOK) {
+                    return status;
+                }
+                initialize_list_bucket_data(lbData);
+            }
+            else {
+                // Initialize the next one
+                initialize_list_bucket_contents
+                    (&(lbData->contents[lbData->contentsCount]));
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "ListBucketResult/CommonPrefixes/Prefix")) {
+            // Finished a Prefix
+            lbData->commonPrefixesCount++;
+            if (lbData->commonPrefixesCount == MAX_COMMON_PREFIXES) {
+                // Make the callback
+                S3Status status = make_list_bucket_callback(lbData);
+                if (status != S3StatusOK) {
+                    return status;
+                }
+                initialize_list_bucket_data(lbData);
+            }
+            else {
+                // Initialize the next one
+                lbData->commonPrefixes[lbData->commonPrefixesCount][0] = 0;
+                lbData->commonPrefixLens[lbData->commonPrefixesCount] = 0;
+            }
+        }
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status listBucketPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    ListBucketData *lbData = (ListBucketData *) callbackData;
+
+    return (*(lbData->responsePropertiesCallback))
+        (responseProperties, lbData->callbackData);
+}
+
+
+static S3Status listBucketDataCallback(int bufferSize, const char *buffer,
+                                       void *callbackData)
+{
+    ListBucketData *lbData = (ListBucketData *) callbackData;
+
+    return simplexml_add(&(lbData->simpleXml), buffer, bufferSize);
+}
+
+
+static void listBucketCompleteCallback(S3Status requestStatus,
+                                       const S3ErrorDetails *s3ErrorDetails,
+                                       void *callbackData)
+{
+    ListBucketData *lbData = (ListBucketData *) callbackData;
+
+    // Make the callback if there is anything
+    if (lbData->contentsCount || lbData->commonPrefixesCount) {
+        make_list_bucket_callback(lbData);
+    }
+
+    (*(lbData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, lbData->callbackData);
+
+    simplexml_deinitialize(&(lbData->simpleXml));
+
+    free(lbData);
+}
+
+
+void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix,
+                    const char *marker, const char *delimiter, int maxkeys,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ListBucketHandler *handler, void *callbackData)
+{
+    // Compose the query params
+    string_buffer(queryParams, 4096);
+    string_buffer_initialize(queryParams);
+
+#define safe_append(name, value)                                        \
+    do {                                                                \
+        int fit;                                                        \
+        if (amp) {                                                      \
+            string_buffer_append(queryParams, "&", 1, fit);             \
+            if (!fit) {                                                 \
+                (*(handler->responseHandler.completeCallback))          \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);      \
+                return;                                                 \
+            }                                                           \
+        }                                                               \
+        string_buffer_append(queryParams, name "=",                     \
+                             sizeof(name "=") - 1, fit);                \
+        if (!fit) {                                                     \
+            (*(handler->responseHandler.completeCallback))              \
+                (S3StatusQueryParamsTooLong, 0, callbackData);          \
+            return;                                                     \
+        }                                                               \
+        amp = 1;                                                        \
+        char encoded[3 * 1024];                                         \
+        if (!urlEncode(encoded, value, 1024, 1)) {                   \
+            (*(handler->responseHandler.completeCallback))              \
+                (S3StatusQueryParamsTooLong, 0, callbackData);          \
+            return;                                                     \
+        }                                                               \
+        string_buffer_append(queryParams, encoded, strlen(encoded),     \
+                             fit);                                      \
+        if (!fit) {                                                     \
+            (*(handler->responseHandler.completeCallback))              \
+                (S3StatusQueryParamsTooLong, 0, callbackData);          \
+            return;                                                     \
+        }                                                               \
+    } while (0)
+
+
+    int amp = 0;
+    if (prefix && *prefix) {
+        safe_append("prefix", prefix);
+    }
+    if (marker && *marker) {
+        safe_append("marker", marker);
+    }
+    if (delimiter && *delimiter) {
+        safe_append("delimiter", delimiter);
+    }
+    if (maxkeys) {
+        char maxKeysString[64];
+        snprintf(maxKeysString, sizeof(maxKeysString), "%d", maxkeys);
+        safe_append("max-keys", maxKeysString);
+    }
+
+    ListBucketData *lbData =
+        (ListBucketData *) malloc(sizeof(ListBucketData));
+
+    if (!lbData) {
+        (*(handler->responseHandler.completeCallback))
+            (S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    simplexml_initialize(&(lbData->simpleXml), &listBucketXmlCallback, lbData);
+
+    lbData->responsePropertiesCallback =
+        handler->responseHandler.propertiesCallback;
+    lbData->listBucketCallback = handler->listBucketCallback;
+    lbData->responseCompleteCallback =
+        handler->responseHandler.completeCallback;
+    lbData->callbackData = callbackData;
+
+    string_buffer_initialize(lbData->isTruncated);
+    string_buffer_initialize(lbData->nextMarker);
+    initialize_list_bucket_data(lbData);
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        0,                                            // key
+        queryParams[0] ? queryParams : 0,             // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &listBucketPropertiesCallback,                // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &listBucketDataCallback,                      // fromS3Callback
+        &listBucketCompleteCallback,                  // completeCallback
+        lbData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}

+ 602 - 0
libs/libs3/src/bucket_metadata.c

@@ -0,0 +1,602 @@
+/** **************************************************************************
+ * bucket_metadata.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef __APPLE__
+    #include <openssl/md5.h>
+    #include <openssl/bio.h>
+    #include <openssl/evp.h>
+    #include <openssl/buffer.h>
+#endif
+
+#include "libs3.h"
+#include "request.h"
+
+// Use a rather arbitrary max size for the document of 64K
+#define ACL_XML_DOC_MAXSIZE (64 * 1024)
+
+
+// get acl -------------------------------------------------------------------
+
+typedef struct GetAclData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    int *aclGrantCountReturn;
+    S3AclGrant *aclGrants;
+    char *ownerId;
+    char *ownerDisplayName;
+    string_buffer(xmlDocument, ACL_XML_DOC_MAXSIZE);
+} GetAclData;
+
+
+static S3Status getAclPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    GetAclData *gaData = (GetAclData *) callbackData;
+
+    return (*(gaData->responsePropertiesCallback))
+        (responseProperties, gaData->callbackData);
+}
+
+
+static S3Status getAclDataCallback(int bufferSize, const char *buffer,
+                                   void *callbackData)
+{
+    GetAclData *gaData = (GetAclData *) callbackData;
+
+    int fit;
+
+    string_buffer_append(gaData->xmlDocument, buffer, bufferSize, fit);
+
+    return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
+}
+
+
+static void getAclCompleteCallback(S3Status requestStatus,
+                                   const S3ErrorDetails *s3ErrorDetails,
+                                   void *callbackData)
+{
+    GetAclData *gaData = (GetAclData *) callbackData;
+
+    if (requestStatus == S3StatusOK) {
+        // Parse the document
+        requestStatus = S3_convert_acl
+            (gaData->xmlDocument, gaData->ownerId, gaData->ownerDisplayName,
+             gaData->aclGrantCountReturn, gaData->aclGrants);
+    }
+
+    (*(gaData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, gaData->callbackData);
+
+    free(gaData);
+}
+
+
+void S3_get_acl(const S3BucketContext *bucketContext, const char *key,
+                char *ownerId, char *ownerDisplayName,
+                int *aclGrantCountReturn, S3AclGrant *aclGrants,
+                S3RequestContext *requestContext,
+                int timeoutMs,
+                const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    GetAclData *gaData = (GetAclData *) malloc(sizeof(GetAclData));
+    if (!gaData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    gaData->responsePropertiesCallback = handler->propertiesCallback;
+    gaData->responseCompleteCallback = handler->completeCallback;
+    gaData->callbackData = callbackData;
+
+    gaData->aclGrantCountReturn = aclGrantCountReturn;
+    gaData->aclGrants = aclGrants;
+    gaData->ownerId = ownerId;
+    gaData->ownerDisplayName = ownerDisplayName;
+    string_buffer_initialize(gaData->xmlDocument);
+    *aclGrantCountReturn = 0;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        "acl",                                        // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &getAclPropertiesCallback,                    // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &getAclDataCallback,                          // fromS3Callback
+        &getAclCompleteCallback,                      // completeCallback
+        gaData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// set acl -------------------------------------------------------------------
+
+static S3Status generateAclXmlDocument(const char *ownerId,
+                                       const char *ownerDisplayName,
+                                       int aclGrantCount,
+                                       const S3AclGrant *aclGrants,
+                                       int *xmlDocumentLenReturn,
+                                       char *xmlDocument,
+                                       int xmlDocumentBufferSize)
+{
+    *xmlDocumentLenReturn = 0;
+
+#define append(fmt, ...)                                        \
+    do {                                                        \
+        *xmlDocumentLenReturn += snprintf                       \
+            (&(xmlDocument[*xmlDocumentLenReturn]),             \
+             xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
+             fmt, __VA_ARGS__);                                 \
+        if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) {   \
+            return S3StatusXmlDocumentTooLarge;                 \
+        } \
+    } while (0)
+
+    append("<AccessControlPolicy><Owner><ID>%s</ID><DisplayName>%s"
+           "</DisplayName></Owner><AccessControlList>", ownerId,
+           ownerDisplayName);
+
+    int i;
+    for (i = 0; i < aclGrantCount; i++) {
+        append("%s", "<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/"
+               "XMLSchema-instance\" xsi:type=\"");
+        const S3AclGrant *grant = &(aclGrants[i]);
+        switch (grant->granteeType) {
+        case S3GranteeTypeAmazonCustomerByEmail:
+            append("AmazonCustomerByEmail\"><EmailAddress>%s</EmailAddress>",
+                   grant->grantee.amazonCustomerByEmail.emailAddress);
+            break;
+        case S3GranteeTypeCanonicalUser:
+            append("CanonicalUser\"><ID>%s</ID><DisplayName>%s</DisplayName>",
+                   grant->grantee.canonicalUser.id,
+                   grant->grantee.canonicalUser.displayName);
+            break;
+        default: { // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
+            const char *grantee;
+            switch (grant->granteeType) {
+            case S3GranteeTypeAllAwsUsers:
+                grantee = ACS_GROUP_AWS_USERS;
+                break;
+            case S3GranteeTypeAllUsers:
+                grantee = ACS_GROUP_ALL_USERS;
+                break;
+            default:
+                grantee = ACS_GROUP_LOG_DELIVERY;
+                break;
+            }
+            append("Group\"><URI>%s</URI>", grantee);
+        }
+            break;
+        }
+        append("</Grantee><Permission>%s</Permission></Grant>",
+               ((grant->permission == S3PermissionRead) ? "READ" :
+                (grant->permission == S3PermissionWrite) ? "WRITE" :
+                (grant->permission == S3PermissionReadACP) ? "READ_ACP" :
+                (grant->permission == S3PermissionWriteACP) ? "WRITE_ACP" :
+                "FULL_CONTROL"));
+    }
+
+    append("%s", "</AccessControlList></AccessControlPolicy>");
+
+    return S3StatusOK;
+}
+
+
+typedef struct SetXmlData
+{
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    int xmlDocumentLen;
+    const char *xmlDocument;
+    int xmlDocumentBytesWritten;
+
+} SetXmlData;
+
+
+static S3Status setXmlPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    SetXmlData *paData = (SetXmlData *) callbackData;
+
+    return (*(paData->responsePropertiesCallback))
+        (responseProperties, paData->callbackData);
+}
+
+
+static int setXmlDataCallback(int bufferSize, char *buffer, void *callbackData)
+{
+    SetXmlData *paData = (SetXmlData *) callbackData;
+
+    int remaining = (paData->xmlDocumentLen -
+                     paData->xmlDocumentBytesWritten);
+
+    int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+    if (!toCopy) {
+        return 0;
+    }
+
+    memcpy(buffer, &(paData->xmlDocument
+                     [paData->xmlDocumentBytesWritten]), toCopy);
+
+    paData->xmlDocumentBytesWritten += toCopy;
+
+    return toCopy;
+}
+
+
+static void setXmlCompleteCallback(S3Status requestStatus,
+                                   const S3ErrorDetails *s3ErrorDetails,
+                                   void *callbackData)
+{
+    SetXmlData *paData = (SetXmlData *) callbackData;
+
+    (*(paData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, paData->callbackData);
+
+    free(paData);
+}
+
+
+void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
+                const char *ownerId, const char *ownerDisplayName,
+                int aclGrantCount, const S3AclGrant *aclGrants,
+                S3RequestContext *requestContext,
+                int timeoutMs,
+                const S3ResponseHandler *handler, void *callbackData)
+{
+    char aclBuffer[ACL_XML_DOC_MAXSIZE];
+
+    if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
+        (*(handler->completeCallback))
+            (S3StatusTooManyGrants, 0, callbackData);
+        return;
+    }
+
+    SetXmlData *data = (SetXmlData *) malloc(sizeof(SetXmlData));
+    if (!data) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    data->xmlDocument = aclBuffer;
+
+    // Convert aclGrants to XML document
+    S3Status status = generateAclXmlDocument
+        (ownerId, ownerDisplayName, aclGrantCount, aclGrants,
+         &(data->xmlDocumentLen), aclBuffer,
+         sizeof(aclBuffer));
+    if (status != S3StatusOK) {
+        free(data);
+        (*(handler->completeCallback))(status, 0, callbackData);
+        return;
+    }
+
+    data->responsePropertiesCallback = handler->propertiesCallback;
+    data->responseCompleteCallback = handler->completeCallback;
+    data->callbackData = callbackData;
+
+    data->xmlDocumentBytesWritten = 0;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        "acl",                                        // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &setXmlPropertiesCallback,                    // propertiesCallback
+        &setXmlDataCallback,                          // toS3Callback
+        data->xmlDocumentLen,                         // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        &setXmlCompleteCallback,                      // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// get lifecycle -------------------------------------------------------------------
+
+typedef struct GetLifecycleData
+{
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    char *lifecycleXmlDocumentReturn;
+    int lifecycleXmlDocumentBufferSize;
+    int lifecycleXmlDocumentWritten;
+} GetLifecycleData;
+
+
+static S3Status getLifecyclePropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    GetLifecycleData *gaData = (GetLifecycleData *) callbackData;
+
+    return (*(gaData->responsePropertiesCallback))
+        (responseProperties, gaData->callbackData);
+}
+
+
+static S3Status getLifecycleDataCallback(int bufferSize, const char *buffer,
+                                   void *callbackData)
+{
+    GetLifecycleData *gaData = (GetLifecycleData *) callbackData;
+
+    if ((gaData->lifecycleXmlDocumentWritten + bufferSize) >= gaData->lifecycleXmlDocumentBufferSize)
+        return S3StatusXmlDocumentTooLarge;
+
+    snprintf(gaData->lifecycleXmlDocumentReturn + gaData->lifecycleXmlDocumentWritten, bufferSize + 1, "%s", buffer);
+    gaData->lifecycleXmlDocumentWritten += bufferSize;
+
+    return S3StatusOK;
+}
+
+
+static void getLifecycleCompleteCallback(S3Status requestStatus,
+                                         const S3ErrorDetails *s3ErrorDetails,
+                                         void *callbackData)
+{
+    GetLifecycleData *gaData = (GetLifecycleData *) callbackData;
+
+    (*(gaData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, gaData->callbackData);
+
+    free(gaData);
+}
+
+
+void S3_get_lifecycle(const S3BucketContext *bucketContext,
+                      char *lifecycleXmlDocumentReturn, int lifecycleXmlDocumentBufferSize,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    GetLifecycleData *gaData = (GetLifecycleData *) malloc(sizeof(GetLifecycleData));
+    if (!gaData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    gaData->responsePropertiesCallback = handler->propertiesCallback;
+    gaData->responseCompleteCallback = handler->completeCallback;
+    gaData->callbackData = callbackData;
+
+    gaData->lifecycleXmlDocumentReturn = lifecycleXmlDocumentReturn;
+    gaData->lifecycleXmlDocumentBufferSize = lifecycleXmlDocumentBufferSize;
+    gaData->lifecycleXmlDocumentWritten = 0;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        "lifecycle",                                  // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &getLifecyclePropertiesCallback,              // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &getLifecycleDataCallback,                    // fromS3Callback
+        &getLifecycleCompleteCallback,                // completeCallback
+        gaData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+#ifndef __APPLE__
+// Calculate MD5 and encode it as base64
+void generate_content_md5(const char* data, int size,
+                          char* retBuffer, int retBufferSize) {
+    MD5_CTX mdContext;
+    BIO *bio, *b64;
+    BUF_MEM *bufferPtr;
+
+    char md5Buffer[MD5_DIGEST_LENGTH];
+
+    MD5_Init(&mdContext);
+    MD5_Update(&mdContext, data, size);
+    MD5_Final((unsigned char*)md5Buffer, &mdContext);
+
+
+    b64 = BIO_new(BIO_f_base64());
+    bio = BIO_new(BIO_s_mem());
+    bio = BIO_push(b64, bio);
+
+    BIO_set_flags(bio, BIO_FLAGS_BASE64_NO_NL); //Ignore newlines - write everything in one line
+    BIO_write(bio, md5Buffer, sizeof(md5Buffer));
+    (void) BIO_flush(bio);
+    BIO_get_mem_ptr(bio, &bufferPtr);
+    (void) BIO_set_close(bio, BIO_NOCLOSE);
+
+    if ((unsigned int)retBufferSize + 1 < bufferPtr->length) {
+        retBuffer[0] = '\0';
+        BIO_free_all(bio);
+        return;
+    }
+
+    memcpy(retBuffer, bufferPtr->data, bufferPtr->length);
+    retBuffer[bufferPtr->length] = '\0';
+
+    BIO_free_all(bio);
+}
+#endif
+
+
+void S3_set_lifecycle(const S3BucketContext *bucketContext,
+                      const char *lifecycleXmlDocument,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData)
+{
+#ifdef __APPLE__
+    /* This request requires calculating MD5 sum.
+     * MD5 sum requires OpenSSL library, which is not used on Apple.
+     * TODO Implement some MD5+Base64 caculation on Apple
+     */
+    (*(handler->completeCallback))(S3StatusNotSupported, 0, callbackData);
+    return;
+#else
+    char md5Base64[MD5_DIGEST_LENGTH * 2];
+
+    SetXmlData *data = (SetXmlData *) malloc(sizeof(SetXmlData));
+    if (!data) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+
+    data->xmlDocument = lifecycleXmlDocument;
+    data->xmlDocumentLen = strlen(lifecycleXmlDocument);
+
+    data->responsePropertiesCallback = handler->propertiesCallback;
+    data->responseCompleteCallback = handler->completeCallback;
+    data->callbackData = callbackData;
+
+    data->xmlDocumentBytesWritten = 0;
+
+    generate_content_md5(data->xmlDocument, data->xmlDocumentLen,
+                         md5Base64, sizeof (md5Base64));
+
+    // Set up S3PutProperties
+    S3PutProperties properties =
+    {
+        0,                                       // contentType
+        md5Base64,                               // md5
+        0,                                       // cacheControl
+        0,                                       // contentDispositionFilename
+        0,                                       // contentEncoding
+       -1,                                       // expires
+        0,                                       // cannedAcl
+        0,                                       // metaDataCount
+        0,                                       // metaData
+        0                                        // useServerSideEncryption
+    };
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        "lifecycle",                                  // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        &properties,                                  // putProperties
+        &setXmlPropertiesCallback,                    // propertiesCallback
+        &setXmlDataCallback,                          // toS3Callback
+        data->xmlDocumentLen,                         // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        &setXmlCompleteCallback,                      // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+#endif
+}
+

+ 255 - 0
libs/libs3/src/error_parser.c

@@ -0,0 +1,255 @@
+/** **************************************************************************
+ * error_parser.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <string.h>
+#include "error_parser.h"
+
+
+static S3Status errorXmlCallback(const char *elementPath, const char *data,
+                                 int dataLen, void *callbackData)
+{
+    // We ignore end of element callbacks because we don't care about them
+    if (!data) {
+        return S3StatusOK;
+    }
+
+    ErrorParser *errorParser = (ErrorParser *) callbackData;
+
+    int fit;
+
+    if (!strcmp(elementPath, "Error")) {
+        // Ignore, this is the Error element itself, we only care about subs
+    }
+    else if (!strcmp(elementPath, "Error/Code")) {
+        string_buffer_append(errorParser->code, data, dataLen, fit);
+    }
+    else if (!strcmp(elementPath, "Error/Message")) {
+        string_buffer_append(errorParser->message, data, dataLen, fit);
+        errorParser->s3ErrorDetails.message = errorParser->message;
+    }
+    else if (!strcmp(elementPath, "Error/Resource")) {
+        string_buffer_append(errorParser->resource, data, dataLen, fit);
+        errorParser->s3ErrorDetails.resource = errorParser->resource;
+    }
+    else if (!strcmp(elementPath, "Error/FurtherDetails")) {
+        string_buffer_append(errorParser->furtherDetails, data, dataLen, fit);
+        errorParser->s3ErrorDetails.furtherDetails = 
+            errorParser->furtherDetails;
+    }
+    else {
+        if (strncmp(elementPath, "Error/", sizeof("Error/") - 1)) {
+            // If for some weird reason it's not within the Error element,
+            // ignore it
+            return S3StatusOK;
+        }
+        // It's an unknown error element.  See if it matches the most
+        // recent error element.
+        const char *elementName = &(elementPath[sizeof("Error/") - 1]);
+        if (errorParser->s3ErrorDetails.extraDetailsCount && 
+            !strcmp(elementName, errorParser->s3ErrorDetails.extraDetails
+                    [errorParser->s3ErrorDetails.extraDetailsCount - 1].name)) {
+            // Append the value
+            string_multibuffer_append(errorParser->extraDetailsNamesValues,
+                                      data, dataLen, fit);
+            // If it didn't fit, remove this extra
+            if (!fit) {
+                errorParser->s3ErrorDetails.extraDetailsCount--;
+            }
+            return S3StatusOK;
+        }
+        // OK, must add another unknown error element, if it will fit.
+        if (errorParser->s3ErrorDetails.extraDetailsCount ==
+            sizeof(errorParser->extraDetails)) {
+            // Won't fit.  Ignore this one.
+            return S3StatusOK;
+        }
+        // Copy in the name and value
+        char *name = string_multibuffer_current
+            (errorParser->extraDetailsNamesValues);
+        int nameLen = strlen(elementName);
+        string_multibuffer_add(errorParser->extraDetailsNamesValues,
+                               elementName, nameLen, fit);
+        if (!fit) {
+            // Name didn't fit; ignore this one.
+            return S3StatusOK;
+        }
+        char *value = string_multibuffer_current
+            (errorParser->extraDetailsNamesValues);
+        string_multibuffer_add(errorParser->extraDetailsNamesValues,
+                               data, dataLen, fit);
+        if (!fit) {
+            // Value didn't fit; ignore this one.
+            return S3StatusOK;
+        }
+        S3NameValue *nv = 
+            &(errorParser->extraDetails
+              [errorParser->s3ErrorDetails.extraDetailsCount++]);
+        nv->name = name;
+        nv->value = value;
+    }
+
+    return S3StatusOK;
+}
+
+
+void error_parser_initialize(ErrorParser *errorParser)
+{
+    errorParser->s3ErrorDetails.message = 0;
+    errorParser->s3ErrorDetails.resource = 0;
+    errorParser->s3ErrorDetails.furtherDetails = 0;
+    errorParser->s3ErrorDetails.extraDetailsCount = 0;
+    errorParser->s3ErrorDetails.extraDetails = errorParser->extraDetails;
+    errorParser->errorXmlParserInitialized = 0;
+    string_buffer_initialize(errorParser->code);
+    string_buffer_initialize(errorParser->message);
+    string_buffer_initialize(errorParser->resource);
+    string_buffer_initialize(errorParser->furtherDetails);
+    string_multibuffer_initialize(errorParser->extraDetailsNamesValues);
+}
+
+
+S3Status error_parser_add(ErrorParser *errorParser, char *buffer,
+                          int bufferSize)
+{
+    if (!errorParser->errorXmlParserInitialized) {
+        simplexml_initialize(&(errorParser->errorXmlParser), &errorXmlCallback,
+                             errorParser);
+        errorParser->errorXmlParserInitialized = 1;
+    }
+
+    return simplexml_add(&(errorParser->errorXmlParser), buffer, bufferSize);
+}
+
+
+void error_parser_convert_status(ErrorParser *errorParser, S3Status *status)
+{
+    // Convert the error status string into a code
+    if (!errorParser->codeLen) {
+        return;
+    }
+
+#define HANDLE_CODE(name)                                       \
+    do {                                                        \
+        if (!strcmp(errorParser->code, #name)) {                \
+            *status = S3StatusError##name;                      \
+            goto code_set;                                      \
+        }                                                       \
+    } while (0)
+    
+    HANDLE_CODE(AccessDenied);
+    HANDLE_CODE(AccountProblem);
+    HANDLE_CODE(AmbiguousGrantByEmailAddress);
+    HANDLE_CODE(BadDigest);
+    HANDLE_CODE(BucketAlreadyExists);
+    HANDLE_CODE(BucketAlreadyOwnedByYou);
+    HANDLE_CODE(BucketNotEmpty);
+    HANDLE_CODE(CredentialsNotSupported);
+    HANDLE_CODE(CrossLocationLoggingProhibited);
+    HANDLE_CODE(EntityTooSmall);
+    HANDLE_CODE(EntityTooLarge);
+    HANDLE_CODE(ExpiredToken);
+    HANDLE_CODE(IllegalVersioningConfigurationException); 
+    HANDLE_CODE(IncompleteBody);
+    HANDLE_CODE(IncorrectNumberOfFilesInPostRequest);
+    HANDLE_CODE(InlineDataTooLarge);
+    HANDLE_CODE(InternalError);
+    HANDLE_CODE(InvalidAccessKeyId);
+    HANDLE_CODE(InvalidAddressingHeader);
+    HANDLE_CODE(InvalidArgument);
+    HANDLE_CODE(InvalidBucketName);
+    HANDLE_CODE(InvalidBucketState); 
+    HANDLE_CODE(InvalidDigest);
+    HANDLE_CODE(InvalidEncryptionAlgorithmError);
+    HANDLE_CODE(InvalidLocationConstraint);
+    HANDLE_CODE(InvalidObjectState); 
+    HANDLE_CODE(InvalidPart); 
+    HANDLE_CODE(InvalidPartOrder);
+    HANDLE_CODE(InvalidPayer);
+    HANDLE_CODE(InvalidPolicyDocument);
+    HANDLE_CODE(InvalidRange);
+    HANDLE_CODE(InvalidRequest);
+    HANDLE_CODE(InvalidSecurity);
+    HANDLE_CODE(InvalidSOAPRequest);
+    HANDLE_CODE(InvalidStorageClass);
+    HANDLE_CODE(InvalidTargetBucketForLogging);
+    HANDLE_CODE(InvalidToken);
+    HANDLE_CODE(InvalidURI);
+    HANDLE_CODE(KeyTooLong);
+    HANDLE_CODE(MalformedACLError);
+    HANDLE_CODE(MalformedPOSTRequest);
+    HANDLE_CODE(MalformedXML);
+    HANDLE_CODE(MaxMessageLengthExceeded);
+    HANDLE_CODE(MaxPostPreDataLengthExceededError);
+    HANDLE_CODE(MetadataTooLarge);
+    HANDLE_CODE(MethodNotAllowed);
+    HANDLE_CODE(MissingAttachment);
+    HANDLE_CODE(MissingContentLength);
+    HANDLE_CODE(MissingRequestBodyError);
+    HANDLE_CODE(MissingSecurityElement);
+    HANDLE_CODE(MissingSecurityHeader);
+    HANDLE_CODE(NoLoggingStatusForKey);
+    HANDLE_CODE(NoSuchBucket);
+    HANDLE_CODE(NoSuchKey);
+    HANDLE_CODE(NoSuchLifecycleConfiguration);
+    HANDLE_CODE(NoSuchUpload);
+    HANDLE_CODE(NoSuchVersion);
+    HANDLE_CODE(NotImplemented);
+    HANDLE_CODE(NotSignedUp);
+    HANDLE_CODE(NoSuchBucketPolicy);
+    HANDLE_CODE(OperationAborted);
+    HANDLE_CODE(PermanentRedirect);
+    HANDLE_CODE(PreconditionFailed);
+    HANDLE_CODE(Redirect);
+    HANDLE_CODE(RestoreAlreadyInProgress);
+    HANDLE_CODE(RequestIsNotMultiPartContent);
+    HANDLE_CODE(RequestTimeout);
+    HANDLE_CODE(RequestTimeTooSkewed);
+    HANDLE_CODE(RequestTorrentOfBucketError);
+    HANDLE_CODE(SignatureDoesNotMatch);
+    HANDLE_CODE(ServiceUnavailable);
+    HANDLE_CODE(SlowDown);
+    HANDLE_CODE(TemporaryRedirect);
+    HANDLE_CODE(TokenRefreshRequired);
+    HANDLE_CODE(TooManyBuckets);
+    HANDLE_CODE(UnexpectedContent);
+    HANDLE_CODE(UnresolvableGrantByEmailAddress);
+    HANDLE_CODE(UserKeyMustBeSpecified);
+    HANDLE_CODE(QuotaExceeded);
+    *status = S3StatusErrorUnknown;
+
+ code_set:
+
+    return;
+}
+
+
+// Always call this
+void error_parser_deinitialize(ErrorParser *errorParser)
+{
+    if (errorParser->errorXmlParserInitialized) {
+        simplexml_deinitialize(&(errorParser->errorXmlParser));
+    }
+}

+ 490 - 0
libs/libs3/src/general.c

@@ -0,0 +1,490 @@
+/** **************************************************************************
+ * general.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <ctype.h>
+#include <string.h>
+#include "request.h"
+#include "simplexml.h"
+#include "util.h"
+
+static int initializeCountG = 0;
+
+S3Status S3_initialize(const char *userAgentInfo, int flags,
+                       const char *defaultS3HostName)
+{
+    if (initializeCountG++) {
+        return S3StatusOK;
+    }
+
+    return request_api_initialize(userAgentInfo, flags, defaultS3HostName);
+}
+
+
+void S3_deinitialize()
+{
+    if (--initializeCountG) {
+        return;
+    }
+
+    request_api_deinitialize();
+}
+
+const char *S3_get_status_name(S3Status status)
+{
+    switch (status) {
+#define handlecase(s)                           \
+        case S3Status##s:                       \
+            return #s
+
+        handlecase(OK);
+        handlecase(InternalError);
+        handlecase(OutOfMemory);
+        handlecase(Interrupted);
+        handlecase(InvalidBucketNameTooLong);
+        handlecase(InvalidBucketNameFirstCharacter);
+        handlecase(InvalidBucketNameCharacter);
+        handlecase(InvalidBucketNameCharacterSequence);
+        handlecase(InvalidBucketNameTooShort);
+        handlecase(InvalidBucketNameDotQuadNotation);
+        handlecase(QueryParamsTooLong);
+        handlecase(FailedToInitializeRequest);
+        handlecase(MetaDataHeadersTooLong);
+        handlecase(BadMetaData);
+        handlecase(BadContentType);
+        handlecase(ContentTypeTooLong);
+        handlecase(BadMD5);
+        handlecase(MD5TooLong);
+        handlecase(BadCacheControl);
+        handlecase(CacheControlTooLong);
+        handlecase(BadContentDispositionFilename);
+        handlecase(ContentDispositionFilenameTooLong);
+        handlecase(BadContentEncoding);
+        handlecase(ContentEncodingTooLong);
+        handlecase(BadIfMatchETag);
+        handlecase(IfMatchETagTooLong);
+        handlecase(BadIfNotMatchETag);
+        handlecase(IfNotMatchETagTooLong);
+        handlecase(HeadersTooLong);
+        handlecase(KeyTooLong);
+        handlecase(UriTooLong);
+        handlecase(XmlParseFailure);
+        handlecase(EmailAddressTooLong);
+        handlecase(UserIdTooLong);
+        handlecase(UserDisplayNameTooLong);
+        handlecase(GroupUriTooLong);
+        handlecase(PermissionTooLong);
+        handlecase(TargetBucketTooLong);
+        handlecase(TargetPrefixTooLong);
+        handlecase(TooManyGrants);
+        handlecase(BadGrantee);
+        handlecase(BadPermission);
+        handlecase(XmlDocumentTooLarge);
+        handlecase(NameLookupError);
+        handlecase(FailedToConnect);
+        handlecase(ServerFailedVerification);
+        handlecase(ConnectionFailed);
+        handlecase(AbortedByCallback);
+        handlecase(NotSupported);
+        handlecase(ErrorAccessDenied);
+        handlecase(ErrorAccountProblem);
+        handlecase(ErrorAmbiguousGrantByEmailAddress);
+        handlecase(ErrorBadDigest);
+        handlecase(ErrorBucketAlreadyExists);
+        handlecase(ErrorBucketAlreadyOwnedByYou);
+        handlecase(ErrorBucketNotEmpty);
+        handlecase(ErrorCredentialsNotSupported);
+        handlecase(ErrorCrossLocationLoggingProhibited);
+        handlecase(ErrorEntityTooSmall);
+        handlecase(ErrorEntityTooLarge);
+        handlecase(ErrorExpiredToken);
+        handlecase(ErrorIllegalVersioningConfigurationException);
+        handlecase(ErrorIncompleteBody);
+        handlecase(ErrorIncorrectNumberOfFilesInPostRequest);
+        handlecase(ErrorInlineDataTooLarge);
+        handlecase(ErrorInternalError);
+        handlecase(ErrorInvalidAccessKeyId);
+        handlecase(ErrorInvalidAddressingHeader);
+        handlecase(ErrorInvalidArgument);
+        handlecase(ErrorInvalidBucketName);
+        handlecase(ErrorInvalidBucketState);
+        handlecase(ErrorInvalidDigest);
+        handlecase(ErrorInvalidEncryptionAlgorithmError);
+        handlecase(ErrorInvalidLocationConstraint);
+        handlecase(ErrorInvalidObjectState);
+        handlecase(ErrorInvalidPart);
+        handlecase(ErrorInvalidPartOrder);
+        handlecase(ErrorInvalidPayer);
+        handlecase(ErrorInvalidPolicyDocument);
+        handlecase(ErrorInvalidRange);
+        handlecase(ErrorInvalidRequest);
+        handlecase(ErrorInvalidSecurity);
+        handlecase(ErrorInvalidSOAPRequest);
+        handlecase(ErrorInvalidStorageClass);
+        handlecase(ErrorInvalidTargetBucketForLogging);
+        handlecase(ErrorInvalidToken);
+        handlecase(ErrorInvalidURI);
+        handlecase(ErrorKeyTooLong);
+        handlecase(ErrorMalformedACLError);
+        handlecase(ErrorMalformedPOSTRequest);
+        handlecase(ErrorMalformedXML);
+        handlecase(ErrorMaxMessageLengthExceeded);
+        handlecase(ErrorMaxPostPreDataLengthExceededError);
+        handlecase(ErrorMetadataTooLarge);
+        handlecase(ErrorMethodNotAllowed);
+        handlecase(ErrorMissingAttachment);
+        handlecase(ErrorMissingContentLength);
+        handlecase(ErrorMissingRequestBodyError);
+        handlecase(ErrorMissingSecurityElement);
+        handlecase(ErrorMissingSecurityHeader);
+        handlecase(ErrorNoLoggingStatusForKey);
+        handlecase(ErrorNoSuchBucket);
+        handlecase(ErrorNoSuchKey);
+        handlecase(ErrorNoSuchLifecycleConfiguration);
+        handlecase(ErrorNoSuchUpload);
+        handlecase(ErrorNoSuchVersion);
+        handlecase(ErrorNotImplemented);
+        handlecase(ErrorNotSignedUp);
+        handlecase(ErrorNoSuchBucketPolicy);
+        handlecase(ErrorOperationAborted);
+        handlecase(ErrorPermanentRedirect);
+        handlecase(ErrorPreconditionFailed);
+        handlecase(ErrorRedirect);
+        handlecase(ErrorRestoreAlreadyInProgress);
+        handlecase(ErrorRequestIsNotMultiPartContent);
+        handlecase(ErrorRequestTimeout);
+        handlecase(ErrorRequestTimeTooSkewed);
+        handlecase(ErrorRequestTorrentOfBucketError);
+        handlecase(ErrorSignatureDoesNotMatch);
+        handlecase(ErrorServiceUnavailable);
+        handlecase(ErrorSlowDown);
+        handlecase(ErrorTemporaryRedirect);
+        handlecase(ErrorTokenRefreshRequired);
+        handlecase(ErrorTooManyBuckets);
+        handlecase(ErrorUnexpectedContent);
+        handlecase(ErrorUnresolvableGrantByEmailAddress);
+        handlecase(ErrorUserKeyMustBeSpecified);
+        handlecase(ErrorQuotaExceeded);
+        handlecase(ErrorUnknown);
+        handlecase(HttpErrorMovedTemporarily);
+        handlecase(HttpErrorBadRequest);
+        handlecase(HttpErrorForbidden);
+        handlecase(HttpErrorNotFound);
+        handlecase(HttpErrorConflict);
+        handlecase(HttpErrorUnknown);
+    }
+
+    return "Unknown";
+}
+
+
+S3Status S3_validate_bucket_name(const char *bucketName, S3UriStyle uriStyle)
+{
+    int virtualHostStyle = (uriStyle == S3UriStyleVirtualHost);
+    int len = 0, maxlen = virtualHostStyle ? 63 : 255;
+    const char *b = bucketName;
+
+    int hasDot = 0;
+    int hasNonDigit = 0;
+
+    while (*b) {
+        if (len == maxlen) {
+            return S3StatusInvalidBucketNameTooLong;
+        }
+        else if (isalpha(*b)) {
+            len++, b++;
+            hasNonDigit = 1;
+        }
+        else if (isdigit(*b)) {
+            len++, b++;
+        }
+        else if (len == 0) {
+            return S3StatusInvalidBucketNameFirstCharacter;
+        }
+        else if (*b == '_') {
+            /* Virtual host style bucket names cannot have underscores */
+            if (virtualHostStyle) {
+                return S3StatusInvalidBucketNameCharacter;
+            }
+            len++, b++;
+            hasNonDigit = 1;
+        }
+        else if (*b == '-') {
+            /* Virtual host style bucket names cannot have .- */
+            if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '.')) {
+                return S3StatusInvalidBucketNameCharacterSequence;
+            }
+            len++, b++;
+            hasNonDigit = 1;
+        }
+        else if (*b == '.') {
+            /* Virtual host style bucket names cannot have -. */
+            if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '-')) {
+                return S3StatusInvalidBucketNameCharacterSequence;
+            }
+            len++, b++;
+            hasDot = 1;
+        }
+        else {
+            return S3StatusInvalidBucketNameCharacter;
+        }
+    }
+
+    if (len < 3) {
+        return S3StatusInvalidBucketNameTooShort;
+    }
+
+    /* It's not clear from Amazon's documentation exactly what 'IP address
+       style' means.  In its strictest sense, it could mean 'could be a valid
+       IP address', which would mean that 255.255.255.255 would be invalid,
+       wherase 256.256.256.256 would be valid.  Or it could mean 'has 4 sets
+       of digits separated by dots'.  Who knows.  Let's just be really
+       conservative here: if it has any dots, and no non-digit characters,
+       then we reject it */
+    if (hasDot && !hasNonDigit) {
+        return S3StatusInvalidBucketNameDotQuadNotation;
+    }
+
+    return S3StatusOK;
+}
+
+
+typedef struct ConvertAclData
+{
+    char *ownerId;
+    int ownerIdLen;
+    char *ownerDisplayName;
+    int ownerDisplayNameLen;
+    int *aclGrantCountReturn;
+    S3AclGrant *aclGrants;
+
+    string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
+    string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
+    string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+    string_buffer(groupUri, 128);
+    string_buffer(permission, 32);
+} ConvertAclData;
+
+
+static S3Status convertAclXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    ConvertAclData *caData = (ConvertAclData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "AccessControlPolicy/Owner/ID")) {
+            caData->ownerIdLen +=
+                snprintf(&(caData->ownerId[caData->ownerIdLen]),
+                         S3_MAX_GRANTEE_USER_ID_SIZE - caData->ownerIdLen - 1,
+                         "%.*s", dataLen, data);
+            if (caData->ownerIdLen >= S3_MAX_GRANTEE_USER_ID_SIZE) {
+                return S3StatusUserIdTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "AccessControlPolicy/Owner/"
+                         "DisplayName")) {
+            caData->ownerDisplayNameLen +=
+                snprintf(&(caData->ownerDisplayName
+                           [caData->ownerDisplayNameLen]),
+                         S3_MAX_GRANTEE_DISPLAY_NAME_SIZE -
+                         caData->ownerDisplayNameLen - 1,
+                         "%.*s", dataLen, data);
+            if (caData->ownerDisplayNameLen >=
+                S3_MAX_GRANTEE_DISPLAY_NAME_SIZE) {
+                return S3StatusUserDisplayNameTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                    "AccessControlPolicy/AccessControlList/Grant/"
+                    "Grantee/EmailAddress")) {
+            // AmazonCustomerByEmail
+            string_buffer_append(caData->emailAddress, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusEmailAddressTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "AccessControlPolicy/AccessControlList/Grant/"
+                         "Grantee/ID")) {
+            // CanonicalUser
+            string_buffer_append(caData->userId, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusUserIdTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "AccessControlPolicy/AccessControlList/Grant/"
+                         "Grantee/DisplayName")) {
+            // CanonicalUser
+            string_buffer_append(caData->userDisplayName, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusUserDisplayNameTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "AccessControlPolicy/AccessControlList/Grant/"
+                         "Grantee/URI")) {
+            // Group
+            string_buffer_append(caData->groupUri, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusGroupUriTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "AccessControlPolicy/AccessControlList/Grant/"
+                         "Permission")) {
+            // Permission
+            string_buffer_append(caData->permission, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusPermissionTooLong;
+            }
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "AccessControlPolicy/AccessControlList/"
+                    "Grant")) {
+            // A grant has just been completed; so add the next S3AclGrant
+            // based on the values read
+            if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
+                return S3StatusTooManyGrants;
+            }
+
+            S3AclGrant *grant = &(caData->aclGrants
+                                  [*(caData->aclGrantCountReturn)]);
+
+            if (caData->emailAddress[0]) {
+                grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+                strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
+                       caData->emailAddress);
+            }
+            else if (caData->userId[0] && caData->userDisplayName[0]) {
+                grant->granteeType = S3GranteeTypeCanonicalUser;
+                strcpy(grant->grantee.canonicalUser.id, caData->userId);
+                strcpy(grant->grantee.canonicalUser.displayName,
+                       caData->userDisplayName);
+            }
+            else if (caData->groupUri[0]) {
+                if (!strcmp(caData->groupUri,
+                            ACS_GROUP_AWS_USERS)) {
+                    grant->granteeType = S3GranteeTypeAllAwsUsers;
+                }
+                else if (!strcmp(caData->groupUri,
+                            ACS_GROUP_ALL_USERS)) {
+                    grant->granteeType = S3GranteeTypeAllUsers;
+                }
+                else if (!strcmp(caData->groupUri,
+                                 ACS_GROUP_LOG_DELIVERY)) {
+                    grant->granteeType = S3GranteeTypeLogDelivery;
+                }
+                else {
+                    return S3StatusBadGrantee;
+                }
+            }
+            else {
+                return S3StatusBadGrantee;
+            }
+
+            if (!strcmp(caData->permission, "READ")) {
+                grant->permission = S3PermissionRead;
+            }
+            else if (!strcmp(caData->permission, "WRITE")) {
+                grant->permission = S3PermissionWrite;
+            }
+            else if (!strcmp(caData->permission, "READ_ACP")) {
+                grant->permission = S3PermissionReadACP;
+            }
+            else if (!strcmp(caData->permission, "WRITE_ACP")) {
+                grant->permission = S3PermissionWriteACP;
+            }
+            else if (!strcmp(caData->permission, "FULL_CONTROL")) {
+                grant->permission = S3PermissionFullControl;
+            }
+            else {
+                return S3StatusBadPermission;
+            }
+
+            (*(caData->aclGrantCountReturn))++;
+
+            string_buffer_initialize(caData->emailAddress);
+            string_buffer_initialize(caData->userId);
+            string_buffer_initialize(caData->userDisplayName);
+            string_buffer_initialize(caData->groupUri);
+            string_buffer_initialize(caData->permission);
+        }
+    }
+
+    return S3StatusOK;
+}
+
+
+S3Status S3_convert_acl(char *aclXml, char *ownerId, char *ownerDisplayName,
+                        int *aclGrantCountReturn, S3AclGrant *aclGrants)
+{
+    ConvertAclData data;
+
+    data.ownerId = ownerId;
+    data.ownerIdLen = 0;
+    data.ownerId[0] = 0;
+    data.ownerDisplayName = ownerDisplayName;
+    data.ownerDisplayNameLen = 0;
+    data.ownerDisplayName[0] = 0;
+    data.aclGrantCountReturn = aclGrantCountReturn;
+    data.aclGrants = aclGrants;
+    *aclGrantCountReturn = 0;
+    string_buffer_initialize(data.emailAddress);
+    string_buffer_initialize(data.userId);
+    string_buffer_initialize(data.userDisplayName);
+    string_buffer_initialize(data.groupUri);
+    string_buffer_initialize(data.permission);
+
+    // Use a simplexml parser
+    SimpleXml simpleXml;
+    simplexml_initialize(&simpleXml, &convertAclXmlCallback, &data);
+
+    S3Status status = simplexml_add(&simpleXml, aclXml, strlen(aclXml));
+
+    simplexml_deinitialize(&simpleXml);
+
+    return status;
+}
+
+
+int S3_status_is_retryable(S3Status status)
+{
+    switch (status) {
+    case S3StatusNameLookupError:
+    case S3StatusFailedToConnect:
+    case S3StatusConnectionFailed:
+    case S3StatusErrorInternalError:
+    case S3StatusErrorOperationAborted:
+    case S3StatusErrorRequestTimeout:
+        return 1;
+    default:
+        return 0;
+    }
+}

+ 119 - 0
libs/libs3/src/mingw_functions.c

@@ -0,0 +1,119 @@
+/** **************************************************************************
+ * mingw_functions.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <pthread.h>
+#include <sys/utsname.h>
+
+unsigned long pthread_self()
+{
+    return (unsigned long) GetCurrentThreadId();
+}
+
+
+int pthread_mutex_init(pthread_mutex_t *mutex, void *v)
+{
+    (void) v;
+
+    InitializeCriticalSection(&(mutex->criticalSection));
+
+    return 0;
+}
+
+
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+    EnterCriticalSection(&(mutex->criticalSection));
+
+    return 0;
+}
+
+
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+    LeaveCriticalSection(&(mutex->criticalSection));
+
+    return 0;
+}
+
+
+int pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+    DeleteCriticalSection(&(mutex->criticalSection));
+
+    return 0;
+}
+
+
+int uname(struct utsname *u)
+{
+    OSVERSIONINFO info;
+    info.dwOSVersionInfoSize = sizeof(info);
+
+    if (!GetVersionEx(&info)) {
+        return -1;
+    }
+
+    u->machine = "";
+
+    switch (info.dwMajorVersion) {
+    case 4:
+        switch (info.dwMinorVersion) {
+        case 0:
+            u->sysname = "Microsoft Windows NT 4.0";
+            break;
+        case 10:
+            u->sysname = "Microsoft Windows 98";
+            break;
+        case 90:
+            u->sysname = "Microsoft Windows Me";
+            break;
+        default:
+            return -1;
+        }
+        break;
+
+    case 5:
+        switch (info.dwMinorVersion) {
+        case 0:
+            u->sysname = "Microsoft Windows 2000";
+            break;
+        case 1:
+            u->sysname = "Microsoft Windows XP";
+            break;
+        case 2:
+            u->sysname = "Microsoft Server 2003";
+            break;
+        default:
+            return -1;
+        }
+        break;
+
+    default:
+        return -1;
+    }
+
+    return 0;
+}

+ 37 - 0
libs/libs3/src/mingw_s3_functions.c

@@ -0,0 +1,37 @@
+/** **************************************************************************
+ * mingw_s3_functions.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+int setenv(const char *a, const char *b, int c)
+{
+    (void) c;
+
+    return SetEnvironmentVariable(a, b);
+}
+
+int unsetenv(const char *a)
+{
+    return SetEnvironmentVariable(a, 0);
+}

+ 1093 - 0
libs/libs3/src/multipart.c

@@ -0,0 +1,1093 @@
+/** **************************************************************************
+ * multipart.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <string.h>
+#include <stdlib.h>
+#include "libs3.h"
+#include "request.h"
+#include "simplexml.h"
+
+
+typedef struct InitialMultipartData
+{
+    SimpleXml simpleXml;
+    int len;
+    S3MultipartInitialHandler *handler;
+    string_buffer(upload_id, 256);
+    void *userdata;
+} InitialMultipartData;
+
+static S3Status InitialMultipartCallback(int bufferSize, const char *buffer,
+                                         void *callbackData)
+{
+    InitialMultipartData *mdata = (InitialMultipartData *) callbackData;
+    return simplexml_add(&(mdata->simpleXml), buffer, bufferSize);
+}
+
+static void InitialMultipartCompleteCallback
+    (S3Status requestStatus, const S3ErrorDetails *s3ErrorDetails,
+     void *callbackData)
+{
+    InitialMultipartData *mdata = (InitialMultipartData *) callbackData;
+
+    if (mdata->handler->responseHandler.completeCallback) {
+        (*mdata->handler->responseHandler.completeCallback)
+            (requestStatus, s3ErrorDetails, mdata->userdata);
+    }
+
+    if (mdata->handler->responseXmlCallback) {
+        (*mdata->handler->responseXmlCallback)
+            (mdata->upload_id, mdata->userdata);
+    }
+
+    simplexml_deinitialize(&(mdata->simpleXml));
+    free(mdata);
+}
+
+static void AbortMultipartUploadCompleteCallback
+    (S3Status requestStatus, const S3ErrorDetails *s3ErrorDetails,
+     void *callbackData)
+{
+    (void) callbackData;
+    (void) s3ErrorDetails;
+    fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(requestStatus));
+
+}
+
+static S3Status initialMultipartXmlCallback(const char *elementPath,
+                                            const char *data,
+                                            int dataLen,
+                                            void *callbackData)
+{
+    InitialMultipartData *mdata = (InitialMultipartData *) callbackData;
+    int fit;
+    if (data) {
+        if (!strcmp(elementPath, "InitiateMultipartUploadResult/UploadId")) {
+            string_buffer_append(mdata->upload_id,data, dataLen, fit);
+        }
+    }
+
+    (void) fit;
+    return S3StatusOK;
+}
+
+void S3_initiate_multipart(S3BucketContext *bucketContext, const char *key,
+                          S3PutProperties *putProperties,
+                          S3MultipartInitialHandler *handler,
+                          S3RequestContext *requestContext,
+                          int timeoutMs,
+                          void *callbackData)
+{
+    InitialMultipartData *mdata =
+        (InitialMultipartData *) malloc(sizeof(InitialMultipartData));
+    simplexml_initialize(&(mdata->simpleXml), &initialMultipartXmlCallback,
+                         mdata);
+    string_buffer_initialize(mdata->upload_id);
+    mdata->handler= handler;
+    mdata->userdata = callbackData;
+
+    RequestParams params =
+    {
+        HttpRequestTypePOST,                          // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        "uploads",                                    // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        putProperties,                                // putProperties
+        handler->responseHandler.propertiesCallback,  // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        InitialMultipartCallback,                     // fromS3Callback
+        InitialMultipartCompleteCallback,             // completeCallback
+        mdata,                                        // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+void S3_abort_multipart_upload(S3BucketContext *bucketContext, const char *key,
+                               const char *uploadId,
+                               int timeoutMs,
+                               S3AbortMultipartUploadHandler *handler)
+{
+    char subResource[512];
+    snprintf(subResource, 512, "uploadId=%s", uploadId);
+
+    RequestParams params =
+    {
+        HttpRequestTypeDELETE,                        // httpRequestType
+        { bucketContext->hostName,                    // hostName
+     	  bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        subResource,                                  // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        handler->responseHandler.propertiesCallback,  // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        AbortMultipartUploadCompleteCallback,         // completeCallback
+        0,                                            // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, 0);
+}
+
+
+/*
+ * S3 Upload Part
+ */
+
+void S3_upload_part(S3BucketContext *bucketContext, const char *key,
+                    S3PutProperties *putProperties,
+                    S3PutObjectHandler *handler, int seq,
+                    const char *upload_id, int partContentLength,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    void *callbackData)
+{
+    char queryParams[512];
+    snprintf(queryParams, 512, "partNumber=%d&uploadId=%s", seq, upload_id);
+
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        queryParams,                                  // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        putProperties,                                // putProperties
+        handler->responseHandler.propertiesCallback,  // propertiesCallback
+        handler->putObjectDataCallback,               // toS3Callback
+        partContentLength,                            // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        handler->responseHandler.completeCallback,    // completeCallback
+        callbackData,                                 // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    request_perform(&params, requestContext);
+}
+
+
+/*
+ * S3 commit multipart
+ *
+ */
+
+typedef struct CommitMultiPartData {
+    SimpleXml simplexml;
+    void *userdata;
+    S3MultipartCommitHandler *handler;
+    //response parsed from
+    string_buffer(location,128);
+    string_buffer(etag,128);
+} CommitMultiPartData;
+
+
+static S3Status commitMultipartResponseXMLcallback(const char *elementPath,
+                                                   const char *data,
+                                                   int dataLen,
+                                                   void *callbackData)
+{
+    int fit;
+    CommitMultiPartData *commit_data = (CommitMultiPartData *) callbackData;
+    if (data) {
+        if (!strcmp(elementPath, "CompleteMultipartUploadResult/Location")) {
+            string_buffer_append(commit_data->location, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "CompleteMultipartUploadResult/ETag")) {
+            string_buffer_append(commit_data->etag, data, dataLen, fit);
+        }
+    }
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status commitMultipartCallback(int bufferSize, const char *buffer,
+                                        void *callbackData)
+{
+    CommitMultiPartData *data = (CommitMultiPartData *) callbackData;
+    return simplexml_add(&(data->simplexml), buffer, bufferSize);
+}
+
+
+static S3Status commitMultipartPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    CommitMultiPartData *data = (CommitMultiPartData *) callbackData;
+
+    if (data->handler->responseHandler.propertiesCallback) {
+        (*(data->handler->responseHandler.propertiesCallback))
+            (responseProperties, data->userdata);
+    }
+    return S3StatusOK;
+}
+
+static void commitMultipartCompleteCallback
+    (S3Status requestStatus, const S3ErrorDetails *s3ErrorDetails,
+     void *callbackData)
+{
+    CommitMultiPartData *data = (CommitMultiPartData*) callbackData;
+    if (data->handler->responseHandler.completeCallback) {
+        (*(data->handler->responseHandler.completeCallback))
+            (requestStatus, s3ErrorDetails, data->userdata);
+    }
+    if (data->handler->responseXmlCallback) {
+        (*data->handler->responseXmlCallback)(data->location, data->etag,
+                                              data->userdata);
+    }
+    simplexml_deinitialize(&(data->simplexml));
+    free(data);
+}
+
+
+static int commitMultipartPutObject(int bufferSize, char *buffer,
+                                    void *callbackData)
+{
+    CommitMultiPartData *data = (CommitMultiPartData*) callbackData;
+    if (data->handler->putObjectDataCallback) {
+        return data->handler->putObjectDataCallback(bufferSize, buffer,
+                                                    data->userdata);
+    }
+    else {
+        return -1;
+    }
+}
+
+void S3_complete_multipart_upload(S3BucketContext *bucketContext,
+                                  const char *key,
+                                  S3MultipartCommitHandler *handler,
+                                  const char *upload_id, int contentLength,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  void *callbackData)
+{
+    char queryParams[512];
+    snprintf(queryParams, 512, "uploadId=%s", upload_id);
+    CommitMultiPartData *data =
+        (CommitMultiPartData *) malloc(sizeof(CommitMultiPartData));
+    data->userdata = callbackData;
+    data->handler = handler;
+    string_buffer_initialize(data->location);
+    string_buffer_initialize(data->etag);
+
+    simplexml_initialize(&(data->simplexml),
+                         commitMultipartResponseXMLcallback, data);
+
+    RequestParams params =
+    {
+        HttpRequestTypePOST,                          // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        queryParams,                                  // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        commitMultipartPropertiesCallback,            // propertiesCallback
+        commitMultipartPutObject,                     // toS3Callback
+        contentLength,                                // toS3CallbackTotalSize
+        commitMultipartCallback,                      // fromS3Callback
+        commitMultipartCompleteCallback,              // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    request_perform(&params, requestContext);
+}
+
+// We read up to 32 Uploads at a time
+#define MAX_UPLOADS 32
+// We read up to 8 CommonPrefixes at a time
+#define MAX_COMMON_PREFIXES 8
+#define MAX_PARTS 32
+
+
+typedef struct ListMultipartUpload
+{
+    string_buffer(key, 1024);
+    string_buffer(uploadId, 256);
+    string_buffer(initiatorId, 256);
+    string_buffer(initiatorDisplayName, 256);
+    string_buffer(ownerId, 256);
+    string_buffer(ownerDisplayName, 256);
+    string_buffer(storageClass, 256);
+    string_buffer(initiated, 256);
+} ListMultipartUpload;
+
+
+typedef struct ListPart
+{
+    string_buffer(eTag, 1024);
+    string_buffer(partNumber, 24);
+    string_buffer(size, 256);
+    string_buffer(lastModified, 256);
+} ListPart;
+
+
+typedef struct ListMultipartData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ListMultipartUploadsResponseCallback *listMultipartCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    string_buffer(isTruncated, 64);
+    string_buffer(nextKeyMarker, 1024);
+    string_buffer(nextUploadIdMarker, 1024);
+
+    int uploadsCount;
+    ListMultipartUpload uploads[MAX_UPLOADS];
+
+    int commonPrefixesCount;
+    char commonPrefixes[MAX_COMMON_PREFIXES][1024];
+    int commonPrefixLens[MAX_COMMON_PREFIXES];
+} ListMultipartData;
+
+
+typedef struct ListPartsData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ListPartsResponseCallback *listPartsCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    string_buffer(isTruncated, 64);
+    string_buffer(nextPartNumberMarker, 1024);
+    string_buffer(initiatorId, 256);
+    string_buffer(initiatorDisplayName, 256);
+    string_buffer(ownerId, 256);
+    string_buffer(ownerDisplayName, 256);
+    string_buffer(storageClass, 256);
+
+    int handlePartsStart;
+    int partsCount;
+    ListPart parts[MAX_PARTS];
+
+} ListPartsData;
+
+
+static void initialize_list_multipart_upload(ListMultipartUpload *upload)
+{
+    string_buffer_initialize(upload->key);
+    string_buffer_initialize(upload->uploadId);
+    string_buffer_initialize(upload->initiatorId);
+    string_buffer_initialize(upload->initiatorDisplayName);
+    string_buffer_initialize(upload->ownerId);
+    string_buffer_initialize(upload->ownerDisplayName);
+    string_buffer_initialize(upload->storageClass);
+    string_buffer_initialize(upload->initiated);
+}
+
+
+static void initialize_list_part(ListPart *part)
+{
+    string_buffer_initialize(part->eTag);
+    string_buffer_initialize(part->partNumber);
+    string_buffer_initialize(part->size);
+    string_buffer_initialize(part->lastModified);
+}
+
+static void initialize_list_multipart_data(ListMultipartData *lmData)
+{
+    lmData->uploadsCount = 0;
+    initialize_list_multipart_upload(lmData->uploads);
+    lmData->commonPrefixesCount = 0;
+    lmData->commonPrefixes[0][0] = 0;
+    lmData->commonPrefixLens[0] = 0;
+}
+
+static void initialize_list_parts_data(ListPartsData *lpData)
+{
+    lpData->partsCount = 0;
+    initialize_list_part(lpData->parts);
+}
+
+
+static S3Status listMultipartPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    ListMultipartData *lmData = (ListMultipartData *) callbackData;
+
+    return (*(lmData->responsePropertiesCallback))
+        (responseProperties, lmData->callbackData);
+}
+
+
+static S3Status listPartsPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    ListPartsData *lpData = (ListPartsData *) callbackData;
+
+    return (*(lpData->responsePropertiesCallback))
+        (responseProperties, lpData->callbackData);
+}
+
+
+static S3Status listMultipartDataCallback(int bufferSize, const char *buffer,
+                                       void *callbackData)
+{
+    ListMultipartData *lmData = (ListMultipartData *) callbackData;
+
+    return simplexml_add(&(lmData->simpleXml), buffer, bufferSize);
+}
+
+
+static S3Status listPartsDataCallback(int bufferSize, const char *buffer,
+                                       void *callbackData)
+{
+    ListPartsData *lpData = (ListPartsData *) callbackData;
+
+    return simplexml_add(&(lpData->simpleXml), buffer, bufferSize);
+}
+
+
+static S3Status make_list_multipart_callback(ListMultipartData *lmData)
+{
+    int i;
+
+    // Convert IsTruncated
+    int isTruncated = (!strcmp(lmData->isTruncated, "true") ||
+                       !strcmp(lmData->isTruncated, "1")) ? 1 : 0;
+
+    // Convert the contents
+    S3ListMultipartUpload uploads[lmData->uploadsCount];
+
+    int uploadsCount = lmData->uploadsCount;
+    for (i = 0; i < uploadsCount; i++) {
+        S3ListMultipartUpload *uploadDest = &(uploads[i]);
+        ListMultipartUpload *uploadSrc = &(lmData->uploads[i]);
+        uploadDest->key = uploadSrc->key;
+        uploadDest->uploadId = uploadSrc->uploadId;
+        uploadDest->initiatorId = uploadSrc->initiatorId;
+        uploadDest->initiatorDisplayName = uploadSrc->initiatorDisplayName;
+        uploadDest->ownerId =
+            uploadSrc->ownerId[0] ?uploadSrc->ownerId : 0;
+        uploadDest->ownerDisplayName = (uploadSrc->ownerDisplayName[0] ?
+                                        uploadSrc->ownerDisplayName : 0);
+        uploadDest->storageClass = uploadSrc->storageClass;
+        uploadDest->initiated = parseIso8601Time(uploadSrc->initiated);
+    }
+
+    // Make the common prefixes array
+    int commonPrefixesCount = lmData->commonPrefixesCount;
+    char *commonPrefixes[commonPrefixesCount];
+    for (i = 0; i < commonPrefixesCount; i++) {
+        commonPrefixes[i] = lmData->commonPrefixes[i];
+    }
+
+    return (*(lmData->listMultipartCallback))
+        (isTruncated, lmData->nextKeyMarker, lmData->nextUploadIdMarker,
+         uploadsCount, uploads, commonPrefixesCount,
+         (const char **) commonPrefixes, lmData->callbackData);
+}
+
+
+static S3Status make_list_parts_callback(ListPartsData *lpData)
+{
+    int i;
+
+    // Convert IsTruncated
+    int isTruncated = (!strcmp(lpData->isTruncated, "true") ||
+                       !strcmp(lpData->isTruncated, "1")) ? 1 : 0;
+
+    // Convert the contents
+    S3ListPart Parts[lpData->partsCount];
+    int partsCount = lpData->partsCount;
+    for (i = 0; i < partsCount; i++) {
+        S3ListPart *partDest = &(Parts[i]);
+        ListPart *partSrc = &(lpData->parts[i]);
+        partDest->eTag = partSrc->eTag;
+        partDest->partNumber = parseUnsignedInt(partSrc->partNumber);
+        partDest->size = parseUnsignedInt(partSrc->size);
+        partDest->lastModified = parseIso8601Time(partSrc->lastModified);
+    }
+
+    return (*(lpData->listPartsCallback))
+        (isTruncated, lpData->nextPartNumberMarker, lpData->initiatorId,
+         lpData->initiatorDisplayName, lpData->ownerId,
+         lpData->ownerDisplayName, lpData->storageClass, partsCount,
+         lpData->handlePartsStart, Parts, lpData->callbackData);
+}
+
+
+static void listMultipartCompleteCallback(S3Status requestStatus,
+                                          const S3ErrorDetails *s3ErrorDetails,
+                                          void *callbackData)
+{
+    ListMultipartData *lmData = (ListMultipartData *) callbackData;
+
+    // Make the callback if there is anything
+    if (lmData->uploadsCount || lmData->commonPrefixesCount) {
+        make_list_multipart_callback(lmData);
+    }
+
+    (*(lmData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, lmData->callbackData);
+
+    simplexml_deinitialize(&(lmData->simpleXml));
+
+    free(lmData);
+}
+
+
+static void listPartsCompleteCallback(S3Status requestStatus,
+                                      const S3ErrorDetails *s3ErrorDetails,
+                                      void *callbackData)
+{
+    ListPartsData *lpData = (ListPartsData *) callbackData;
+
+    // Make the callback if there is anything
+    if (lpData->partsCount) {
+        make_list_parts_callback(lpData);
+    }
+
+    (*(lpData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, lpData->callbackData);
+
+    simplexml_deinitialize(&(lpData->simpleXml));
+
+    free(lpData);
+}
+
+
+static S3Status listMultipartXmlCallback(const char *elementPath,
+                                         const char *data, int dataLen,
+                                         void *callbackData)
+{
+    ListMultipartData *lmData = (ListMultipartData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "ListMultipartUploadsResult/IsTruncated")) {
+            string_buffer_append(lmData->isTruncated, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/NextKeyMarker")) {
+            string_buffer_append(lmData->nextKeyMarker, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/NextUploadIdMarker")) {
+            string_buffer_append(lmData->nextUploadIdMarker, data, dataLen,
+                                 fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/Key")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->key, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/Initiated")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->initiated, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/UploadId")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->uploadId, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/Initiator/ID")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->initiatorId, data, dataLen, fit);
+        }
+        else if (!strcmp
+                 (elementPath,
+                  "ListMultipartUploadsResult/Upload/Initiator/DisplayName")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->initiatorDisplayName, data, dataLen,
+                                 fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/Owner/ID")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->ownerId, data, dataLen, fit);
+        }
+        else if (!strcmp
+                 (elementPath,
+                  "ListMultipartUploadsResult/Upload/Owner/DisplayName")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append
+                (uploads->ownerDisplayName, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/Upload/StorageClass")) {
+            ListMultipartUpload *uploads =
+                &(lmData->uploads[lmData->uploadsCount]);
+            string_buffer_append(uploads->storageClass, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/CommonPrefixes/Prefix")) {
+            int which = lmData->commonPrefixesCount;
+            lmData->commonPrefixLens[which] +=
+                snprintf(lmData->commonPrefixes[which],
+                         sizeof(lmData->commonPrefixes[which]) -
+                         lmData->commonPrefixLens[which] - 1,
+                         "%.*s", dataLen, data);
+            if (lmData->commonPrefixLens[which] >=
+                (int) sizeof(lmData->commonPrefixes[which])) {
+                return S3StatusXmlParseFailure;
+            }
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "ListMultipartUploadsResult/Upload")) {
+            // Finished a Contents
+            lmData->uploadsCount++;
+            if (lmData->uploadsCount == MAX_UPLOADS) {
+                // Make the callback
+                S3Status status = make_list_multipart_callback(lmData);
+                if (status != S3StatusOK) {
+                    return status;
+                }
+                initialize_list_multipart_data(lmData);
+            }
+            else {
+                // Initialize the next one
+                initialize_list_multipart_upload
+                    (&(lmData->uploads[lmData->uploadsCount]));
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "ListMultipartUploadsResult/CommonPrefixes/Prefix")) {
+            // Finished a Prefix
+            lmData->commonPrefixesCount++;
+            if (lmData->commonPrefixesCount == MAX_COMMON_PREFIXES) {
+                // Make the callback
+                S3Status status = make_list_multipart_callback(lmData);
+                if (status != S3StatusOK) {
+                    return status;
+                }
+                initialize_list_multipart_data(lmData);
+            }
+            else {
+                // Initialize the next one
+                lmData->commonPrefixes[lmData->commonPrefixesCount][0] = 0;
+                lmData->commonPrefixLens[lmData->commonPrefixesCount] = 0;
+            }
+        }
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status listPartsXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    ListPartsData *lpData = (ListPartsData *) callbackData;
+    int fit;
+    if (data) {
+        if (!strcmp(elementPath, "ListPartsResult/IsTruncated")) {
+            string_buffer_append(lpData->isTruncated, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListPartsResult/NextPartNumberMarker")) {
+            string_buffer_append(lpData->nextPartNumberMarker, data, dataLen,
+                                 fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/StorageClass")) {
+            string_buffer_append(lpData->storageClass, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Initiator/ID")) {
+            string_buffer_append(lpData->initiatorId, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListPartsResult/Initiator/DisplayName")) {
+            string_buffer_append(lpData->initiatorDisplayName, data, dataLen,
+                                 fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Owner/ID")) {
+            string_buffer_append(lpData->ownerId, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Owner/DisplayName")) {
+            string_buffer_append(lpData->ownerDisplayName, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Part/PartNumber")) {
+            ListPart *parts = &(lpData->parts[lpData->partsCount]);
+            string_buffer_append(parts->partNumber, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Part/LastModified")) {
+            ListPart *parts = &(lpData->parts[lpData->partsCount]);
+            string_buffer_append(parts->lastModified, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Part/ETag")) {
+            ListPart *parts = &(lpData->parts[lpData->partsCount]);
+            string_buffer_append(parts->eTag, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "ListPartsResult/Part/Size")) {
+            ListPart *parts = &(lpData->parts[lpData->partsCount]);
+            string_buffer_append(parts->size, data, dataLen, fit);
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "ListPartsResult/Part")) {
+            // Finished a Contents
+            lpData->partsCount++;
+            if (lpData->partsCount == MAX_PARTS) {
+                // Make the callback
+                S3Status status = make_list_parts_callback(lpData);
+                if (status != S3StatusOK) {
+                    return status;
+                }
+                lpData->handlePartsStart += lpData->partsCount;
+                initialize_list_parts_data(lpData);
+            }
+            else {
+                // Initialize the next one
+                initialize_list_part(&(lpData->parts[lpData->partsCount]));
+            }
+        }
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+void S3_list_multipart_uploads(S3BucketContext *bucketContext,
+                               const char *prefix, const char *keymarker,
+                               const char *uploadidmarker,
+                               const char *encodingtype, const char *delimiter,
+                               int maxuploads, S3RequestContext *requestContext,
+                               int timeoutMs,
+                               const S3ListMultipartUploadsHandler *handler,
+                               void *callbackData)
+{
+    // Compose the query params
+    string_buffer(queryParams, 4096);
+    string_buffer_initialize(queryParams);
+
+#define safe_append(name, value)                                            \
+        do {                                                                \
+            int fit;                                                        \
+            if (amp) {                                                      \
+                string_buffer_append(queryParams, "&", 1, fit);             \
+                if (!fit) {                                                 \
+                    (*(handler->responseHandler.completeCallback))          \
+                        (S3StatusQueryParamsTooLong, 0, callbackData);      \
+                    return;                                                 \
+                }                                                           \
+            }                                                               \
+            string_buffer_append(queryParams, name "=",                     \
+                                 sizeof(name "=") - 1, fit);                \
+            if (!fit) {                                                     \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+            amp = 1;                                                        \
+            char encoded[3 * 1024];                                         \
+            if (!urlEncode(encoded, value, 1024, 1)) {                      \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+            string_buffer_append(queryParams, encoded, strlen(encoded),     \
+                                 fit);                                      \
+            if (!fit) {                                                     \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+        } while (0)
+
+
+        int amp = 0;
+        if (prefix && *prefix) {
+            safe_append("prefix", prefix);
+        }
+        if (keymarker && *keymarker) {
+            safe_append("key-marker", keymarker);
+        }
+        if (delimiter && *delimiter) {
+            safe_append("delimiter", delimiter);
+        }
+        if (uploadidmarker && *uploadidmarker) {
+            safe_append("upload-id-marker", uploadidmarker);
+        }
+        if (encodingtype && *encodingtype) {
+            safe_append("encoding-type", encodingtype);
+        }
+        if (maxuploads) {
+            char maxUploadsString[64];
+            snprintf(maxUploadsString, sizeof(maxUploadsString), "%d",
+                     maxuploads);
+            safe_append("max-uploads", maxUploadsString);
+        }
+
+        ListMultipartData *lmData =
+            (ListMultipartData *) malloc(sizeof(ListMultipartData));
+
+        if (!lmData) {
+            (*(handler->responseHandler.completeCallback))
+                (S3StatusOutOfMemory, 0, callbackData);
+            return;
+        }
+
+        simplexml_initialize(&(lmData->simpleXml), &listMultipartXmlCallback,
+                             lmData);
+
+        lmData->responsePropertiesCallback =
+            handler->responseHandler.propertiesCallback;
+        lmData->listMultipartCallback = handler->responseXmlCallback;
+        lmData->responseCompleteCallback =
+            handler->responseHandler.completeCallback;
+        lmData->callbackData = callbackData;
+
+        string_buffer_initialize(lmData->isTruncated);
+        string_buffer_initialize(lmData->nextKeyMarker);
+        string_buffer_initialize(lmData->nextUploadIdMarker);
+        initialize_list_multipart_data(lmData);
+
+        // Set up the RequestParams
+        RequestParams params =
+        {
+            HttpRequestTypeGET,                      // httpRequestType
+            { bucketContext->hostName,               // hostName
+              bucketContext->bucketName,             // bucketName
+              bucketContext->protocol,               // protocol
+              bucketContext->uriStyle,               // uriStyle
+              bucketContext->accessKeyId,            // accessKeyId
+              bucketContext->secretAccessKey,        // secretAccessKey
+              bucketContext->securityToken,          // securityToken
+              bucketContext->authRegion },           // authRegion
+            0,                                       // key
+            queryParams[0] ? queryParams : 0,        // queryParams
+            "uploads",                               // subResource
+            0,                                       // copySourceBucketName
+            0,                                       // copySourceKey
+            0,                                       // getConditions
+            0,                                       // startByte
+            0,                                       // byteCount
+            0,                                       // putProperties
+            &listMultipartPropertiesCallback,        // propertiesCallback
+            0,                                       // toS3Callback
+            0,                                       // toS3CallbackTotalSize
+            &listMultipartDataCallback,              // fromS3Callback
+            &listMultipartCompleteCallback,          // completeCallback
+            lmData,                                  // callbackData
+            timeoutMs                                // timeoutMs
+        };
+
+        // Perform the request
+        request_perform(&params, requestContext);
+}
+
+
+void S3_list_parts(S3BucketContext *bucketContext, const char *key,
+                   const char *partnumbermarker, const char *uploadid,
+                   const char *encodingtype, int maxparts,
+                   S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3ListPartsHandler *handler, void *callbackData)
+{
+    // Compose the query params
+    string_buffer(queryParams, 4096);
+    string_buffer_initialize(queryParams);
+
+#define safe_append(name, value)                                            \
+        do {                                                                \
+            int fit;                                                        \
+            if (amp) {                                                      \
+                string_buffer_append(queryParams, "&", 1, fit);             \
+                if (!fit) {                                                 \
+                    (*(handler->responseHandler.completeCallback))          \
+                        (S3StatusQueryParamsTooLong, 0, callbackData);      \
+                    return;                                                 \
+                }                                                           \
+            }                                                               \
+            string_buffer_append(queryParams, name "=",                     \
+                                 sizeof(name "=") - 1, fit);                \
+            if (!fit) {                                                     \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+            amp = 1;                                                        \
+            char encoded[3 * 1024];                                         \
+            if (!urlEncode(encoded, value, 1024, 1)) {                      \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+            string_buffer_append(queryParams, encoded, strlen(encoded),     \
+                                 fit);                                      \
+            if (!fit) {                                                     \
+                (*(handler->responseHandler.completeCallback))              \
+                    (S3StatusQueryParamsTooLong, 0, callbackData);          \
+                return;                                                     \
+            }                                                               \
+        } while (0)
+
+        char subResource[512];
+        snprintf(subResource, 512, "uploadId=%s", uploadid);
+        int amp = 0;
+
+        if (partnumbermarker && *partnumbermarker) {
+            safe_append("part-number-marker", partnumbermarker);
+        }
+        if (encodingtype && *encodingtype) {
+            safe_append("encoding-type", encodingtype);
+        }
+        if (maxparts) {
+            char maxPartsString[64];
+            snprintf(maxPartsString, sizeof(maxPartsString), "%d", maxparts);
+            safe_append("max-parts", maxPartsString);
+        }
+
+        ListPartsData *lpData =
+            (ListPartsData *) malloc(sizeof(ListPartsData));
+
+        if (!lpData) {
+            (*(handler->responseHandler.completeCallback))
+                (S3StatusOutOfMemory, 0, callbackData);
+            return;
+        }
+
+        simplexml_initialize(&(lpData->simpleXml), &listPartsXmlCallback,
+                             lpData);
+
+        lpData->responsePropertiesCallback =
+            handler->responseHandler.propertiesCallback;
+        lpData->listPartsCallback = handler->responseXmlCallback;
+        lpData->responseCompleteCallback =
+            handler->responseHandler.completeCallback;
+        lpData->callbackData = callbackData;
+
+        string_buffer_initialize(lpData->isTruncated);
+        string_buffer_initialize(lpData->nextPartNumberMarker);
+        string_buffer_initialize(lpData->initiatorId);
+        string_buffer_initialize(lpData->initiatorDisplayName);
+        string_buffer_initialize(lpData->ownerId);
+        string_buffer_initialize(lpData->ownerDisplayName);
+        string_buffer_initialize(lpData->storageClass);
+        initialize_list_parts_data(lpData);
+        lpData->handlePartsStart = 0;
+        // Set up the RequestParams
+        RequestParams params =
+        {
+            HttpRequestTypeGET,                      // httpRequestType
+            { bucketContext->hostName,               // hostName
+              bucketContext->bucketName,             // bucketName
+              bucketContext->protocol,               // protocol
+              bucketContext->uriStyle,               // uriStyle
+              bucketContext->accessKeyId,            // accessKeyId
+              bucketContext->secretAccessKey,        // secretAccessKey
+              bucketContext->securityToken,          // securityToken
+              bucketContext->authRegion },           // authRegion
+            key,                                     // key
+            queryParams[0] ? queryParams : 0,        // queryParams
+            subResource,                             // subResource
+            0,                                       // copySourceBucketName
+            0,                                       // copySourceKey
+            0,                                       // getConditions
+            0,                                       // startByte
+            0,                                       // byteCount
+            0,                                       // putProperties
+            &listPartsPropertiesCallback,            // propertiesCallback
+            0,                                       // toS3Callback
+            0,                                       // toS3CallbackTotalSize
+            &listPartsDataCallback,                  // fromS3Callback
+            &listPartsCompleteCallback,              // completeCallback
+            lpData,                                  // callbackData
+            timeoutMs                                // timeoutMs
+        };
+
+        // Perform the request
+        request_perform(&params, requestContext);
+}

+ 397 - 0
libs/libs3/src/object.c

@@ -0,0 +1,397 @@
+/** **************************************************************************
+ * object.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <stdlib.h>
+#include <string.h>
+#include "libs3.h"
+#include "request.h"
+
+
+// put object ----------------------------------------------------------------
+
+void S3_put_object(const S3BucketContext *bucketContext, const char *key,
+                   uint64_t contentLength,
+                   const S3PutProperties *putProperties,
+                   S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3PutObjectHandler *handler, void *callbackData)
+{
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        putProperties,                                // putProperties
+        handler->responseHandler.propertiesCallback,  // propertiesCallback
+        handler->putObjectDataCallback,               // toS3Callback
+        contentLength,                                // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        handler->responseHandler.completeCallback,    // completeCallback
+        callbackData,                                 // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// copy object ---------------------------------------------------------------
+
+
+typedef struct CopyObjectData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    int64_t *lastModifiedReturn;
+    int eTagReturnSize;
+    char *eTagReturn;
+    int eTagReturnLen;
+
+    string_buffer(lastModified, 256);
+} CopyObjectData;
+
+
+static S3Status copyObjectXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "CopyObjectResult/LastModified")) {
+            string_buffer_append(coData->lastModified, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath, "CopyObjectResult/ETag")) {
+            if (coData->eTagReturnSize && coData->eTagReturn) {
+                coData->eTagReturnLen +=
+                    snprintf(&(coData->eTagReturn[coData->eTagReturnLen]),
+                             coData->eTagReturnSize -
+                             coData->eTagReturnLen - 1,
+                             "%.*s", dataLen, data);
+                if (coData->eTagReturnLen >= coData->eTagReturnSize) {
+                    return S3StatusXmlParseFailure;
+                }
+            }
+        }
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status copyObjectPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+    return (*(coData->responsePropertiesCallback))
+        (responseProperties, coData->callbackData);
+}
+
+
+static S3Status copyObjectDataCallback(int bufferSize, const char *buffer,
+                                       void *callbackData)
+{
+    CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+    return simplexml_add(&(coData->simpleXml), buffer, bufferSize);
+}
+
+
+static void copyObjectCompleteCallback(S3Status requestStatus,
+                                       const S3ErrorDetails *s3ErrorDetails,
+                                       void *callbackData)
+{
+    CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+    if (coData->lastModifiedReturn) {
+        time_t lastModified = -1;
+        if (coData->lastModifiedLen) {
+            lastModified = parseIso8601Time(coData->lastModified);
+        }
+
+        *(coData->lastModifiedReturn) = lastModified;
+    }
+
+    (*(coData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, coData->callbackData);
+
+    simplexml_deinitialize(&(coData->simpleXml));
+
+    free(coData);
+}
+
+
+void S3_copy_object(const S3BucketContext *bucketContext, const char *key,
+                    const char *destinationBucket, const char *destinationKey,
+                    const S3PutProperties *putProperties,
+                    int64_t *lastModifiedReturn, int eTagReturnSize,
+                    char *eTagReturn, S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData)
+{
+    /* Use the range copier with 0 length */
+    S3_copy_object_range(bucketContext, key,
+                         destinationBucket, destinationKey,
+                         0, NULL, // No multipart
+                         0, 0, // No length => std. copy of < 5GB
+                         putProperties,
+                         lastModifiedReturn, eTagReturnSize,
+                         eTagReturn, requestContext,
+                         timeoutMs,
+                         handler, callbackData);
+}
+
+
+void S3_copy_object_range(const S3BucketContext *bucketContext, const char *key,
+                          const char *destinationBucket,
+                          const char *destinationKey, const int partNo,
+                          const char *uploadId, const unsigned long startOffset,
+                          const unsigned long count,
+                          const S3PutProperties *putProperties,
+                          int64_t *lastModifiedReturn, int eTagReturnSize,
+                          char *eTagReturn, S3RequestContext *requestContext,
+                          int timeoutMs,
+                          const S3ResponseHandler *handler, void *callbackData)
+{
+    // Create the callback data
+    CopyObjectData *data =
+        (CopyObjectData *) malloc(sizeof(CopyObjectData));
+    if (!data) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    simplexml_initialize(&(data->simpleXml), &copyObjectXmlCallback, data);
+
+    data->responsePropertiesCallback = handler->propertiesCallback;
+    data->responseCompleteCallback = handler->completeCallback;
+    data->callbackData = callbackData;
+
+    data->lastModifiedReturn = lastModifiedReturn;
+    data->eTagReturnSize = eTagReturnSize;
+    data->eTagReturn = eTagReturn;
+    if (data->eTagReturnSize && data->eTagReturn) {
+        data->eTagReturn[0] = 0;
+    }
+    data->eTagReturnLen = 0;
+    string_buffer_initialize(data->lastModified);
+
+    // If there's a sequence ID > 0 then add a subResource, OTW pass in NULL
+    char queryParams[512];
+    char *qp = NULL;
+    if (partNo > 0) {
+        snprintf(queryParams, 512, "partNumber=%d&uploadId=%s", partNo, uploadId);
+        qp = queryParams;
+    }
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeCOPY,                          // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          destinationBucket ? destinationBucket :
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        destinationKey ? destinationKey : key,        // key
+        qp,                                           // queryParams
+        0,                                            // subResource
+        bucketContext->bucketName,                    // copySourceBucketName
+        key,                                          // copySourceKey
+        0,                                            // getConditions
+        startOffset,                                  // startByte
+        count,                                        // byteCount
+        putProperties,                                // putProperties
+        &copyObjectPropertiesCallback,                // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &copyObjectDataCallback,                      // fromS3Callback
+        &copyObjectCompleteCallback,                  // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// get object ----------------------------------------------------------------
+
+void S3_get_object(const S3BucketContext *bucketContext, const char *key,
+                   const S3GetConditions *getConditions,
+                   uint64_t startByte, uint64_t byteCount,
+                   S3RequestContext *requestContext,
+                   int timeoutMs,
+                   const S3GetObjectHandler *handler, void *callbackData)
+{
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        getConditions,                                // getConditions
+        startByte,                                    // startByte
+        byteCount,                                    // byteCount
+        0,                                            // putProperties
+        handler->responseHandler.propertiesCallback,  // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        handler->getObjectDataCallback,               // fromS3Callback
+        handler->responseHandler.completeCallback,    // completeCallback
+        callbackData,                                 // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// head object ---------------------------------------------------------------
+
+void S3_head_object(const S3BucketContext *bucketContext, const char *key,
+                    S3RequestContext *requestContext,
+                    int timeoutMs,
+                    const S3ResponseHandler *handler, void *callbackData)
+{
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeHEAD,                          // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        handler->propertiesCallback,                  // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        handler->completeCallback,                    // completeCallback
+        callbackData,                                 // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+// delete object --------------------------------------------------------------
+
+void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
+                      S3RequestContext *requestContext,
+                      int timeoutMs,
+                      const S3ResponseHandler *handler, void *callbackData)
+{
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeDELETE,                        // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        key,                                          // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        handler->propertiesCallback,                  // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        handler->completeCallback,                    // completeCallback
+        callbackData,                                 // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}

+ 1754 - 0
libs/libs3/src/request.c

@@ -0,0 +1,1754 @@
+/** **************************************************************************
+ * request.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <ctype.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <libxml/parser.h>
+#include "request.h"
+#include "request_context.h"
+#include "response_headers_handler.h"
+
+#ifdef __APPLE__
+#include <CommonCrypto/CommonHMAC.h>
+#define S3_SHA256_DIGEST_LENGTH CC_SHA256_DIGEST_LENGTH
+#else
+#include <openssl/hmac.h>
+#include <openssl/sha.h>
+#define S3_SHA256_DIGEST_LENGTH SHA256_DIGEST_LENGTH
+#endif
+
+#define USER_AGENT_SIZE 256
+#define REQUEST_STACK_SIZE 32
+#define SIGNATURE_SCOPE_SIZE 64
+
+//#define SIGNATURE_DEBUG
+
+static int verifyPeer;
+
+static char userAgentG[USER_AGENT_SIZE];
+
+static pthread_mutex_t requestStackMutexG;
+
+static Request *requestStackG[REQUEST_STACK_SIZE];
+
+static int requestStackCountG;
+
+char defaultHostNameG[S3_MAX_HOSTNAME_SIZE];
+
+
+typedef struct RequestComputedValues
+{
+    // All x-amz- headers, in normalized form (i.e. NAME: VALUE, no other ws)
+    char *amzHeaders[S3_MAX_METADATA_COUNT + 2]; // + 2 for acl and date
+
+    // The number of x-amz- headers
+    int amzHeadersCount;
+
+    // Storage for amzHeaders (the +256 is for x-amz-acl and x-amz-date)
+    char amzHeadersRaw[COMPACTED_METADATA_BUFFER_SIZE + 256 + 1];
+
+    // Length of populated data in raw buffer
+    int amzHeadersRawLength;
+
+    // Canonicalized headers for signature
+    string_multibuffer(canonicalizedSignatureHeaders,
+                       COMPACTED_METADATA_BUFFER_SIZE + 256 + 1);
+
+    // Delimited list of header names used for signature
+    char signedHeaders[COMPACTED_METADATA_BUFFER_SIZE];
+
+    // URL-Encoded key
+    char urlEncodedKey[MAX_URLENCODED_KEY_SIZE + 1];
+
+    // Canonicalized resource
+    char canonicalURI[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
+
+    // Canonical sub-resource & query string
+    char canonicalQueryString[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
+
+    // Cache-Control header (or empty)
+    char cacheControlHeader[128];
+
+    // Content-Type header (or empty)
+    char contentTypeHeader[128];
+
+    // Content-MD5 header (or empty)
+    char md5Header[128];
+
+    // Content-Disposition header (or empty)
+    char contentDispositionHeader[128];
+
+    // Content-Encoding header (or empty)
+    char contentEncodingHeader[128];
+
+    // Expires header (or empty)
+    char expiresHeader[128];
+
+    // If-Modified-Since header
+    char ifModifiedSinceHeader[128];
+
+    // If-Unmodified-Since header
+    char ifUnmodifiedSinceHeader[128];
+
+    // If-Match header
+    char ifMatchHeader[128];
+
+    // If-None-Match header
+    char ifNoneMatchHeader[128];
+
+    // Range header
+    char rangeHeader[128];
+
+    // Authorization header
+    char authorizationHeader[1024];
+
+    // Request date stamp
+    char requestDateISO8601[64];
+
+    // Credential used for authorization signature
+    char authCredential[MAX_CREDENTIAL_SIZE + 1];
+
+    // Computed request signature (hex string)
+    char requestSignatureHex[S3_SHA256_DIGEST_LENGTH * 2 + 1];
+
+    // Host header
+    char hostHeader[128];
+
+    // Hex string of hash of request payload
+    char payloadHash[S3_SHA256_DIGEST_LENGTH * 2 + 1];
+} RequestComputedValues;
+
+
+// Called whenever we detect that the request headers have been completely
+// processed; which happens either when we get our first read/write callback,
+// or the request is finished being processed.  Returns nonzero on success,
+// zero on failure.
+static void request_headers_done(Request *request)
+{
+    if (request->propertiesCallbackMade) {
+        return;
+    }
+
+    request->propertiesCallbackMade = 1;
+
+    // Get the http response code
+    long httpResponseCode;
+    request->httpResponseCode = 0;
+    if (curl_easy_getinfo(request->curl, CURLINFO_RESPONSE_CODE,
+                          &httpResponseCode) != CURLE_OK) {
+        // Not able to get the HTTP response code - error
+        request->status = S3StatusInternalError;
+        return;
+    }
+    else {
+        request->httpResponseCode = httpResponseCode;
+    }
+
+    response_headers_handler_done(&(request->responseHeadersHandler),
+                                  request->curl);
+
+    // Only make the callback if it was a successful request; otherwise we're
+    // returning information about the error response itself
+    if (request->propertiesCallback &&
+        (request->httpResponseCode >= 200) &&
+        (request->httpResponseCode <= 299)) {
+        request->status = (*(request->propertiesCallback))
+            (&(request->responseHeadersHandler.responseProperties),
+             request->callbackData);
+    }
+}
+
+
+static size_t curl_header_func(void *ptr, size_t size, size_t nmemb,
+                               void *data)
+{
+    Request *request = (Request *) data;
+
+    int len = size * nmemb;
+
+    response_headers_handler_add
+        (&(request->responseHeadersHandler), (char *) ptr, len);
+
+    return len;
+}
+
+
+static size_t curl_read_func(void *ptr, size_t size, size_t nmemb, void *data)
+{
+    Request *request = (Request *) data;
+
+    int len = size * nmemb;
+
+    // CURL may call this function before response headers are available,
+    // so don't assume response headers are available and attempt to parse
+    // them.  Leave that to curl_write_func, which is guaranteed to be called
+    // only after headers are available.
+
+    if (request->status != S3StatusOK) {
+        return CURL_READFUNC_ABORT;
+    }
+
+    // If there is no data callback, or the data callback has already returned
+    // contentLength bytes, return 0;
+    if (!request->toS3Callback || !request->toS3CallbackBytesRemaining) {
+        return 0;
+    }
+
+    // Don't tell the callback that we are willing to accept more data than we
+    // really are
+    if (len > request->toS3CallbackBytesRemaining) {
+        len = request->toS3CallbackBytesRemaining;
+    }
+
+    // Otherwise, make the data callback
+    int ret = (*(request->toS3Callback))
+        (len, (char *) ptr, request->callbackData);
+    if (ret < 0) {
+        request->status = S3StatusAbortedByCallback;
+        return CURL_READFUNC_ABORT;
+    }
+    else {
+        if (ret > request->toS3CallbackBytesRemaining) {
+            ret = request->toS3CallbackBytesRemaining;
+        }
+        request->toS3CallbackBytesRemaining -= ret;
+        return ret;
+    }
+}
+
+
+static size_t curl_write_func(void *ptr, size_t size, size_t nmemb,
+                              void *data)
+{
+    Request *request = (Request *) data;
+
+    int len = size * nmemb;
+
+    request_headers_done(request);
+
+    if (request->status != S3StatusOK) {
+        return 0;
+    }
+
+    // On HTTP error, we expect to parse an HTTP error response
+    if ((request->httpResponseCode < 200) ||
+        (request->httpResponseCode > 299)) {
+        request->status = error_parser_add
+            (&(request->errorParser), (char *) ptr, len);
+    }
+    // If there was a callback registered, make it
+    else if (request->fromS3Callback) {
+        request->status = (*(request->fromS3Callback))
+            (len, (char *) ptr, request->callbackData);
+    }
+    // Else, consider this an error - S3 has sent back data when it was not
+    // expected
+    else {
+        request->status = S3StatusInternalError;
+    }
+
+    return ((request->status == S3StatusOK) ? len : 0);
+}
+
+
+static S3Status append_amz_header(RequestComputedValues *values,
+                                  int addPrefix,
+                                  const char *headerName,
+                                  const char *headerValue)
+{
+    int rawPos = values->amzHeadersRawLength + 1;
+    values->amzHeaders[values->amzHeadersCount++] = &(values->amzHeadersRaw[rawPos]);
+
+    const char *headerStr = headerName;
+    if (addPrefix) {
+        char headerNameWithPrefix[S3_MAX_METADATA_SIZE - sizeof(": v")];
+        snprintf(headerNameWithPrefix, sizeof(headerNameWithPrefix),
+                 S3_METADATA_HEADER_NAME_PREFIX "%s", headerName);
+        headerStr = headerNameWithPrefix;
+    }
+
+    // Make sure the new header (plus ": " plus string terminator) will fit
+    // in the buffer.
+    if ((values->amzHeadersRawLength + strlen(headerStr) + strlen(headerValue)
+        + 3) >= sizeof(values->amzHeadersRaw)) {
+        return S3StatusMetaDataHeadersTooLong;
+    }
+
+    unsigned long i = 0;
+    for (; i < strlen(headerStr); i++) {
+        values->amzHeadersRaw[rawPos++] = tolower(headerStr[i]);
+    }
+
+    snprintf(&(values->amzHeadersRaw[rawPos]), 3, ": ");
+    rawPos += 2;
+
+    for (i = 0; i < strlen(headerValue); i++) {
+        values->amzHeadersRaw[rawPos++] = headerValue[i];
+    }
+    rawPos--;
+
+    while (isblank(values->amzHeadersRaw[rawPos])) {
+        rawPos--;
+    }
+    values->amzHeadersRaw[++rawPos] = '\0';
+    values->amzHeadersRawLength = rawPos;
+    return S3StatusOK;
+}
+
+// This function 'normalizes' all x-amz-meta headers provided in
+// params->requestHeaders, which means it removes all whitespace from
+// them such that they all look exactly like this:
+// x-amz-meta-${NAME}: ${VALUE}
+// It also adds the x-amz-acl, x-amz-copy-source, x-amz-metadata-directive,
+// and x-amz-server-side-encryption headers if necessary, and always adds the
+// x-amz-date header.  It copies the raw string values into
+// params->amzHeadersRaw, and creates an array of string pointers representing
+// these headers in params->amzHeaders (and also sets params->amzHeadersCount
+// to be the count of the total number of x-amz- headers thus created).
+static S3Status compose_amz_headers(const RequestParams *params,
+                                    int forceUnsignedPayload,
+                                    RequestComputedValues *values)
+{
+    const S3PutProperties *properties = params->putProperties;
+
+    values->amzHeadersCount = 0;
+    values->amzHeadersRaw[0] = '\0';
+    values->amzHeadersRawLength = 0;
+
+    // Check and copy in the x-amz-meta headers
+    if (properties) {
+        int i;
+        for (i = 0; i < properties->metaDataCount; i++) {
+            const S3NameValue *property = &(properties->metaData[i]);
+            append_amz_header(values, 1, property->name, property->value);
+        }
+
+        // Add the x-amz-acl header, if necessary
+        const char *cannedAclString;
+        switch (properties->cannedAcl) {
+        case S3CannedAclPrivate:
+            cannedAclString = NULL;
+            break;
+        case S3CannedAclPublicRead:
+            cannedAclString = "public-read";
+            break;
+        case S3CannedAclPublicReadWrite:
+            cannedAclString = "public-read-write";
+            break;
+        default: // S3CannedAclAuthenticatedRead
+            cannedAclString = "authenticated-read";
+            break;
+        }
+        if (cannedAclString) {
+            append_amz_header(values, 0, "x-amz-acl", cannedAclString);
+        }
+
+        // Add the x-amz-server-side-encryption header, if necessary
+        if (properties->useServerSideEncryption) {
+            append_amz_header(values, 0, "x-amz-server-side-encryption",
+                              "AES256");
+        }
+    }
+
+    // Add the x-amz-date header
+    append_amz_header(values, 0, "x-amz-date", values->requestDateISO8601);
+
+    if (params->httpRequestType == HttpRequestTypeCOPY) {
+        // Add the x-amz-copy-source header
+        if (params->copySourceBucketName && params->copySourceBucketName[0]
+            && params->copySourceKey && params->copySourceKey[0]) {
+            char bucketKey[S3_MAX_METADATA_SIZE];
+            snprintf(bucketKey, sizeof(bucketKey), "/%s/%s",
+                     params->copySourceBucketName, params->copySourceKey);
+            append_amz_header(values, 0, "x-amz-copy-source", bucketKey);
+        }
+        // If byteCount != 0 then we're just copying a range, add header
+        if (params->byteCount > 0) {
+            char byteRange[S3_MAX_METADATA_SIZE];
+            snprintf(byteRange, sizeof(byteRange), "bytes=%zd-%zd",
+                     params->startByte, params->startByte + params->byteCount);
+            append_amz_header(values, 0, "x-amz-copy-source-range", byteRange);
+        }
+        // And the x-amz-metadata-directive header
+        if (properties) {
+            append_amz_header(values, 0, "x-amz-metadata-directive", "REPLACE");
+        }
+    }
+
+    // Add the x-amz-security-token header if necessary
+    if (params->bucketContext.securityToken) {
+        append_amz_header(values, 0, "x-amz-security-token",
+                          params->bucketContext.securityToken);
+    }
+
+    if (!forceUnsignedPayload
+        && (params->httpRequestType == HttpRequestTypeGET
+            || params->httpRequestType == HttpRequestTypeCOPY
+            || params->httpRequestType == HttpRequestTypeDELETE
+            || params->httpRequestType == HttpRequestTypeHEAD)) {
+        // empty payload
+        unsigned char md[S3_SHA256_DIGEST_LENGTH];
+#ifdef __APPLE__
+        CC_SHA256("", 0, md);
+#else
+        SHA256((const unsigned char*) "", 0, md);
+#endif
+        values->payloadHash[0] = '\0';
+        int i = 0;
+        for (; i < S3_SHA256_DIGEST_LENGTH; i++) {
+            snprintf(&(values->payloadHash[i * 2]), 3, "%02x", md[i]);
+        }
+    }
+    else {
+        // TODO: figure out how to manage signed payloads
+        strcpy(values->payloadHash, "UNSIGNED-PAYLOAD");
+    }
+
+    append_amz_header(values, 0, "x-amz-content-sha256",
+                      values->payloadHash);
+
+    return S3StatusOK;
+}
+
+
+// Composes the other headers
+static S3Status compose_standard_headers(const RequestParams *params,
+                                         RequestComputedValues *values)
+{
+
+#define do_put_header(fmt, sourceField, destField, badError, tooLongError)  \
+    do {                                                                    \
+        if (params->putProperties &&                                        \
+            params->putProperties-> sourceField &&                          \
+            params->putProperties-> sourceField[0]) {                       \
+            /* Skip whitespace at beginning of val */                       \
+            const char *val = params->putProperties-> sourceField;          \
+            while (*val && is_blank(*val)) {                                \
+                val++;                                                      \
+            }                                                               \
+            if (!*val) {                                                    \
+                return badError;                                            \
+            }                                                               \
+            /* Compose header, make sure it all fit */                      \
+            int len = snprintf(values-> destField,                          \
+                               sizeof(values-> destField), fmt, val);       \
+            if (len >= (int) sizeof(values-> destField)) {                  \
+                return tooLongError;                                        \
+            }                                                               \
+            /* Now remove the whitespace at the end */                      \
+            while (is_blank(values-> destField[len])) {                     \
+                len--;                                                      \
+            }                                                               \
+            values-> destField[len] = 0;                                    \
+        }                                                                   \
+        else {                                                              \
+            values-> destField[0] = 0;                                      \
+        }                                                                   \
+    } while (0)
+
+#define do_get_header(fmt, sourceField, destField, badError, tooLongError)  \
+    do {                                                                    \
+        if (params->getConditions &&                                        \
+            params->getConditions-> sourceField &&                          \
+            params->getConditions-> sourceField[0]) {                       \
+            /* Skip whitespace at beginning of val */                       \
+            const char *val = params->getConditions-> sourceField;          \
+            while (*val && is_blank(*val)) {                                \
+                val++;                                                      \
+            }                                                               \
+            if (!*val) {                                                    \
+                return badError;                                            \
+            }                                                               \
+            /* Compose header, make sure it all fit */                      \
+            int len = snprintf(values-> destField,                          \
+                               sizeof(values-> destField), fmt, val);       \
+            if (len >= (int) sizeof(values-> destField)) {                  \
+                return tooLongError;                                        \
+            }                                                               \
+            /* Now remove the whitespace at the end */                      \
+            while (is_blank(values-> destField[len])) {                     \
+                len--;                                                      \
+            }                                                               \
+            values-> destField[len] = 0;                                    \
+        }                                                                   \
+        else {                                                              \
+            values-> destField[0] = 0;                                      \
+        }                                                                   \
+    } while (0)
+
+    // Host
+    if (params->bucketContext.uriStyle == S3UriStyleVirtualHost) {
+        const char *requestHostName = params->bucketContext.hostName
+                ? params->bucketContext.hostName : defaultHostNameG;
+
+        size_t len = snprintf(values->hostHeader, sizeof(values->hostHeader),
+                              "Host: %s.%s", params->bucketContext.bucketName,
+                              requestHostName);
+        if (len >= sizeof(values->hostHeader)) {
+            return S3StatusUriTooLong;
+        }
+        while (is_blank(values->hostHeader[len])) {
+            len--;
+        }
+        values->hostHeader[len] = 0;
+    }
+    else {
+        size_t len = snprintf(
+                values->hostHeader,
+                sizeof(values->hostHeader),
+                "Host: %s",
+                params->bucketContext.hostName ?
+                    params->bucketContext.hostName : defaultHostNameG);
+        if (len >= sizeof(values->hostHeader)) {
+            return S3StatusUriTooLong;
+        }
+        while (is_blank(values->hostHeader[len])) {
+            len--;
+        }
+        values->hostHeader[len] = 0;
+    }
+
+    // Cache-Control
+    do_put_header("Cache-Control: %s", cacheControl, cacheControlHeader,
+                  S3StatusBadCacheControl, S3StatusCacheControlTooLong);
+
+    // ContentType
+    do_put_header("Content-Type: %s", contentType, contentTypeHeader,
+                  S3StatusBadContentType, S3StatusContentTypeTooLong);
+
+    // MD5
+    do_put_header("Content-MD5: %s", md5, md5Header, S3StatusBadMD5,
+                  S3StatusMD5TooLong);
+
+    // Content-Disposition
+    do_put_header("Content-Disposition: attachment; filename=\"%s\"",
+                  contentDispositionFilename, contentDispositionHeader,
+                  S3StatusBadContentDispositionFilename,
+                  S3StatusContentDispositionFilenameTooLong);
+
+    // ContentEncoding
+    do_put_header("Content-Encoding: %s", contentEncoding,
+                  contentEncodingHeader, S3StatusBadContentEncoding,
+                  S3StatusContentEncodingTooLong);
+
+    // Expires
+    if (params->putProperties && (params->putProperties->expires >= 0)) {
+        time_t t = (time_t) params->putProperties->expires;
+        struct tm gmt;
+        strftime(values->expiresHeader, sizeof(values->expiresHeader),
+                 "Expires: %a, %d %b %Y %H:%M:%S UTC", gmtime_r(&t, &gmt));
+    }
+    else {
+        values->expiresHeader[0] = 0;
+    }
+
+    // If-Modified-Since
+    if (params->getConditions &&
+        (params->getConditions->ifModifiedSince >= 0)) {
+        time_t t = (time_t) params->getConditions->ifModifiedSince;
+        struct tm gmt;
+        strftime(values->ifModifiedSinceHeader,
+                 sizeof(values->ifModifiedSinceHeader),
+                 "If-Modified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime_r(&t, &gmt));
+    }
+    else {
+        values->ifModifiedSinceHeader[0] = 0;
+    }
+
+    // If-Unmodified-Since header
+    if (params->getConditions &&
+        (params->getConditions->ifNotModifiedSince >= 0)) {
+        time_t t = (time_t) params->getConditions->ifNotModifiedSince;
+        struct tm gmt;
+        strftime(values->ifUnmodifiedSinceHeader,
+                 sizeof(values->ifUnmodifiedSinceHeader),
+                 "If-Unmodified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime_r(&t, &gmt));
+    }
+    else {
+        values->ifUnmodifiedSinceHeader[0] = 0;
+    }
+
+    // If-Match header
+    do_get_header("If-Match: %s", ifMatchETag, ifMatchHeader,
+                  S3StatusBadIfMatchETag, S3StatusIfMatchETagTooLong);
+
+    // If-None-Match header
+    do_get_header("If-None-Match: %s", ifNotMatchETag, ifNoneMatchHeader,
+                  S3StatusBadIfNotMatchETag,
+                  S3StatusIfNotMatchETagTooLong);
+
+    // Range header
+    if (params->startByte || params->byteCount) {
+        if (params->byteCount) {
+            snprintf(values->rangeHeader, sizeof(values->rangeHeader),
+                     "Range: bytes=%llu-%llu",
+                     (unsigned long long) params->startByte,
+                     (unsigned long long) (params->startByte +
+                                           params->byteCount - 1));
+        }
+        else {
+            snprintf(values->rangeHeader, sizeof(values->rangeHeader),
+                     "Range: bytes=%llu-",
+                     (unsigned long long) params->startByte);
+        }
+    }
+    else {
+        values->rangeHeader[0] = 0;
+    }
+
+    return S3StatusOK;
+}
+
+
+// URL encodes the params->key value into params->urlEncodedKey
+static S3Status encode_key(const RequestParams *params,
+                           RequestComputedValues *values)
+{
+    return (urlEncode(values->urlEncodedKey, params->key, S3_MAX_KEY_SIZE, 0) ?
+            S3StatusOK : S3StatusUriTooLong);
+}
+
+
+// Simple comparison function for comparing two "<key><delim><value>"
+// delimited strings, returning true if the key of s1 comes
+// before the key of s2 alphabetically, false if not
+static int headerle(const char *s1, const char *s2, char delim)
+{
+    while (1) {
+        if (*s1 == delim) {
+            return (*s2 != delim);
+        }
+        else if (*s2 == delim) {
+            return 0;
+        }
+        else if (*s2 < *s1) {
+            return 0;
+        }
+        else if (*s2 > *s1) {
+            return 1;
+        }
+        s1++, s2++;
+    }
+    return 0;
+}
+
+
+// Replace this with merge sort eventually, it's the best stable sort.  But
+// since typically the number of elements being sorted is small, it doesn't
+// matter that much which sort is used, and gnome sort is the world's simplest
+// stable sort.  Added a slight twist to the standard gnome_sort - don't go
+// forward +1, go forward to the last highest index considered.  This saves
+// all the string comparisons that would be done "going forward", and thus
+// only does the necessary string comparisons to move values back into their
+// sorted position.
+static void kv_gnome_sort(const char **values, int size, char delim)
+{
+    int i = 0, last_highest = 0;
+
+    while (i < size) {
+        if ((i == 0) || headerle(values[i - 1], values[i], delim)) {
+            i = ++last_highest;
+        }
+        else {
+            const char *tmp = values[i];
+            values[i] = values[i - 1];
+            values[--i] = tmp;
+        }
+    }
+}
+
+
+// Canonicalizes the signature headers into the canonicalizedSignatureHeaders buffer
+static void canonicalize_signature_headers(RequestComputedValues *values)
+{
+    // Make a copy of the headers that will be sorted
+    const char *sortedHeaders[S3_MAX_METADATA_COUNT + 3];
+
+    memcpy(sortedHeaders, values->amzHeaders,
+           (values->amzHeadersCount * sizeof(sortedHeaders[0])));
+
+    // add the content-type header and host header
+    int headerCount = values->amzHeadersCount;
+    if (values->contentTypeHeader[0]) {
+        sortedHeaders[headerCount++] = values->contentTypeHeader;
+    }
+    if (values->hostHeader[0]) {
+        sortedHeaders[headerCount++] = values->hostHeader;
+    }
+    if (values->rangeHeader[0]) {
+        sortedHeaders[headerCount++] = values->rangeHeader;
+    }
+    if (values->md5Header[0]) {
+        sortedHeaders[headerCount++] = values->md5Header;
+    }
+
+    // Now sort these
+    kv_gnome_sort(sortedHeaders, headerCount, ':');
+
+    // Now copy this sorted list into the buffer, all the while:
+    // - folding repeated headers into single lines, and
+    // - folding multiple lines
+    // - removing the space after the colon
+    int lastHeaderLen = 0;
+    char *buffer = values->canonicalizedSignatureHeaders;
+    char *hbuf = values->signedHeaders;
+    int i = 0;
+    for (; i < headerCount; i++) {
+        const char *header = sortedHeaders[i];
+        const char *c = header;
+        char v;
+        // If the header names are the same, append the next value
+        if ((i > 0) &&
+            !strncmp(header, sortedHeaders[i - 1], lastHeaderLen)) {
+            // Replacing the previous newline with a comma
+            *(buffer - 1) = ',';
+            // Skip the header name and space
+            c += (lastHeaderLen + 1);
+        }
+        // Else this is a new header
+        else {
+            // Copy in everything up to the space in the ": "
+            while (*c != ' ') {
+                v = tolower(*c++);
+                *buffer++ = v;
+                *hbuf++ = v;
+            }
+            // replace the ":" with a ";"
+            *(hbuf - 1) = ';';
+            // Save the header len since it's a new header
+            lastHeaderLen = c - header;
+            // Skip the space
+            c++;
+        }
+        // Now copy in the value, folding the lines
+        while (*c) {
+            // If c points to a \r\n[whitespace] sequence, then fold
+            // this newline out
+            if ((*c == '\r') && (*(c + 1) == '\n') && is_blank(*(c + 2))) {
+                c += 3;
+                while (is_blank(*c)) {
+                    c++;
+                }
+                // Also, what has most recently been copied into buffer may
+                // have been whitespace, and since we're folding whitespace
+                // out around this newline sequence, back buffer up over
+                // any whitespace it contains
+                while (is_blank(*(buffer - 1))) {
+                    buffer--;
+                }
+                continue;
+            }
+            *buffer++ = *c++;
+        }
+        // Finally, add the newline
+        *buffer++ = '\n';
+    }
+    // Remove the extra trailing semicolon from the header name list
+    // and terminate the string.
+    *(hbuf - 1) = '\0';
+
+    // Terminate the buffer
+    *buffer = 0;
+}
+
+
+// Canonicalizes the resource into params->canonicalizedResource
+static void canonicalize_resource(const S3BucketContext *context,
+                                  const char *urlEncodedKey,
+                                  char *buffer)
+{
+    int len = 0;
+
+    *buffer = 0;
+
+#define append(str) len += sprintf(&(buffer[len]), "%s", str)
+
+    if (context->uriStyle == S3UriStylePath) {
+        if (context->bucketName && context->bucketName[0]) {
+            buffer[len++] = '/';
+            append(context->bucketName);
+        }
+    }
+
+    append("/");
+
+    if (urlEncodedKey && urlEncodedKey[0]) {
+        append(urlEncodedKey);
+    }
+
+#undef append
+}
+
+
+static void sort_query_string(const char *queryString, char *result)
+{
+#ifdef SIGNATURE_DEBUG
+    printf("\n--\nsort_and_urlencode\nqueryString: %s\n", queryString);
+#endif
+
+    unsigned int numParams = 1;
+    const char *tmp = queryString;
+    while ((tmp = strchr(tmp, '&')) != NULL) {
+        numParams++;
+        tmp++;
+    }
+
+    const char* params[numParams];
+
+    char tokenized[strlen(queryString) + 1];
+    strncpy(tokenized, queryString, strlen(queryString) + 1);
+
+    char *tok = tokenized;
+    const char *token = NULL;
+    char *save = NULL;
+    unsigned int i = 0;
+
+    while ((token = strtok_r(tok, "&", &save)) != NULL) {
+        tok = NULL;
+        params[i++] = token;
+    }
+
+    kv_gnome_sort(params, numParams, '=');
+
+#ifdef SIGNATURE_DEBUG
+    for (i = 0; i < numParams; i++) {
+        printf("%d: %s\n", i, params[i]);
+    }
+#endif
+
+    unsigned int pi = 0;
+    for (; pi < numParams; pi++) {
+        // All params are urlEncoded
+        strncat(result, params[pi], strlen(params[pi]));
+        strncat(result, "&", 1);
+    }
+    result[strlen(result) - 1] = '\0';
+}
+
+
+// Canonicalize the query string part of the request into a buffer
+static void canonicalize_query_string(const char *queryParams,
+                                      const char *subResource, char *buffer)
+{
+    int len = 0;
+
+    *buffer = 0;
+
+#define append(str) len += sprintf(&(buffer[len]), "%s", str)
+
+    if (queryParams && queryParams[0]) {
+        char sorted[strlen(queryParams) * 2];
+        sorted[0] = '\0';
+        sort_query_string(queryParams, sorted);
+        append(sorted);
+    }
+
+    if (subResource && subResource[0]) {
+        if (queryParams && queryParams[0]) {
+            append("&");
+        }
+        append(subResource);
+        if (!strchr(subResource, '=')) {
+            append("=");
+        }
+    }
+
+#undef append
+}
+
+
+static HttpRequestType http_request_method_to_type(const char *method)
+{
+    if (!method) {
+        return HttpRequestTypeInvalid;
+    }
+    if (strcmp(method, "POST") == 0) {
+        return HttpRequestTypePOST;
+    }
+    else if (strcmp(method, "GET") == 0) {
+        return HttpRequestTypeGET;
+    }
+    else if (strcmp(method, "HEAD") == 0) {
+        return HttpRequestTypeHEAD;
+    }
+    else if (strcmp(method, "PUT") == 0) {
+        return HttpRequestTypePUT;
+    }
+    else if (strcmp(method, "COPY") == 0) {
+        return HttpRequestTypeCOPY;
+    }
+    else if (strcmp(method, "DELETE") == 0) {
+        return HttpRequestTypeDELETE;
+    }
+    return HttpRequestTypeInvalid;
+}
+
+
+// Convert an HttpRequestType to an HTTP Verb string
+static const char *http_request_type_to_verb(HttpRequestType requestType)
+{
+    switch (requestType) {
+    case HttpRequestTypePOST:
+        return "POST";
+    case HttpRequestTypeGET:
+        return "GET";
+    case HttpRequestTypeHEAD:
+        return "HEAD";
+    case HttpRequestTypePUT:
+    case HttpRequestTypeCOPY:
+        return "PUT";
+    default: // HttpRequestTypeDELETE
+        return "DELETE";
+    }
+}
+
+
+// Composes the Authorization header for the request
+static S3Status compose_auth_header(const RequestParams *params,
+                                    RequestComputedValues *values)
+{
+    const char *httpMethod = http_request_type_to_verb(params->httpRequestType);
+    int canonicalRequestLen = strlen(httpMethod) + 1 +
+    strlen(values->canonicalURI) + 1 +
+    strlen(values->canonicalQueryString) + 1 +
+    strlen(values->canonicalizedSignatureHeaders) + 1 +
+    strlen(values->signedHeaders) + 1 +
+    2 * S3_SHA256_DIGEST_LENGTH + 1; // 2 hex digits for each byte
+
+    int len = 0;
+
+    char canonicalRequest[canonicalRequestLen];
+
+#define buf_append(buf, format, ...)                    \
+    len += snprintf(&(buf[len]), sizeof(buf) - len,     \
+                    format, __VA_ARGS__)
+
+    canonicalRequest[0] = '\0';
+    buf_append(canonicalRequest, "%s\n", httpMethod);
+    buf_append(canonicalRequest, "%s\n", values->canonicalURI);
+    buf_append(canonicalRequest, "%s\n", values->canonicalQueryString);
+    buf_append(canonicalRequest, "%s\n", values->canonicalizedSignatureHeaders);
+    buf_append(canonicalRequest, "%s\n", values->signedHeaders);
+
+    buf_append(canonicalRequest, "%s", values->payloadHash);
+
+#ifdef SIGNATURE_DEBUG
+    printf("--\nCanonical Request:\n%s\n", canonicalRequest);
+#endif
+
+    len = 0;
+    unsigned char canonicalRequestHash[S3_SHA256_DIGEST_LENGTH];
+#ifdef __APPLE__
+    CC_SHA256(canonicalRequest, strlen(canonicalRequest), canonicalRequestHash);
+#else
+    const unsigned char *rqstData = (const unsigned char*) canonicalRequest;
+    SHA256(rqstData, strlen(canonicalRequest), canonicalRequestHash);
+#endif
+    char canonicalRequestHashHex[2 * S3_SHA256_DIGEST_LENGTH + 1];
+    canonicalRequestHashHex[0] = '\0';
+    int i = 0;
+    for (; i < S3_SHA256_DIGEST_LENGTH; i++) {
+        buf_append(canonicalRequestHashHex, "%02x", canonicalRequestHash[i]);
+    }
+
+    const char *awsRegion = S3_DEFAULT_REGION;
+    if (params->bucketContext.authRegion) {
+        awsRegion = params->bucketContext.authRegion;
+    }
+    char scope[SIGNATURE_SCOPE_SIZE + 1];
+    snprintf(scope, sizeof(scope), "%.8s/%s/s3/aws4_request",
+             values->requestDateISO8601, awsRegion);
+
+    char stringToSign[17 + 17 + SIGNATURE_SCOPE_SIZE + 1
+        + strlen(canonicalRequestHashHex)];
+    snprintf(stringToSign, sizeof(stringToSign), "AWS4-HMAC-SHA256\n%s\n%s\n%s",
+             values->requestDateISO8601, scope, canonicalRequestHashHex);
+
+#ifdef SIGNATURE_DEBUG
+    printf("--\nString to Sign:\n%s\n", stringToSign);
+#endif
+
+    const char *secretAccessKey = params->bucketContext.secretAccessKey;
+    char accessKey[strlen(secretAccessKey) + 5];
+    snprintf(accessKey, sizeof(accessKey), "AWS4%s", secretAccessKey);
+
+#ifdef __APPLE__
+    unsigned char dateKey[S3_SHA256_DIGEST_LENGTH];
+    CCHmac(kCCHmacAlgSHA256, accessKey, strlen(accessKey),
+           values->requestDateISO8601, 8, dateKey);
+    unsigned char dateRegionKey[S3_SHA256_DIGEST_LENGTH];
+    CCHmac(kCCHmacAlgSHA256, dateKey, S3_SHA256_DIGEST_LENGTH, awsRegion,
+           strlen(awsRegion), dateRegionKey);
+    unsigned char dateRegionServiceKey[S3_SHA256_DIGEST_LENGTH];
+    CCHmac(kCCHmacAlgSHA256, dateRegionKey, S3_SHA256_DIGEST_LENGTH, "s3", 2,
+           dateRegionServiceKey);
+    unsigned char signingKey[S3_SHA256_DIGEST_LENGTH];
+    CCHmac(kCCHmacAlgSHA256, dateRegionServiceKey, S3_SHA256_DIGEST_LENGTH,
+           "aws4_request", strlen("aws4_request"), signingKey);
+
+    unsigned char finalSignature[S3_SHA256_DIGEST_LENGTH];
+    CCHmac(kCCHmacAlgSHA256, signingKey, S3_SHA256_DIGEST_LENGTH, stringToSign,
+            strlen(stringToSign), finalSignature);
+#else
+    const EVP_MD *sha256evp = EVP_sha256();
+    unsigned char dateKey[S3_SHA256_DIGEST_LENGTH];
+    HMAC(sha256evp, accessKey, strlen(accessKey),
+         (const unsigned char*) values->requestDateISO8601, 8, dateKey,
+         NULL);
+    unsigned char dateRegionKey[S3_SHA256_DIGEST_LENGTH];
+    HMAC(sha256evp, dateKey, S3_SHA256_DIGEST_LENGTH,
+         (const unsigned char*) awsRegion, strlen(awsRegion), dateRegionKey,
+         NULL);
+    unsigned char dateRegionServiceKey[S3_SHA256_DIGEST_LENGTH];
+    HMAC(sha256evp, dateRegionKey, S3_SHA256_DIGEST_LENGTH,
+         (const unsigned char*) "s3", 2, dateRegionServiceKey, NULL);
+    unsigned char signingKey[S3_SHA256_DIGEST_LENGTH];
+    HMAC(sha256evp, dateRegionServiceKey, S3_SHA256_DIGEST_LENGTH,
+         (const unsigned char*) "aws4_request", strlen("aws4_request"),
+         signingKey,
+         NULL);
+
+    unsigned char finalSignature[S3_SHA256_DIGEST_LENGTH];
+    HMAC(sha256evp, signingKey, S3_SHA256_DIGEST_LENGTH,
+         (const unsigned char*) stringToSign, strlen(stringToSign),
+         finalSignature, NULL);
+#endif
+
+    len = 0;
+    values->requestSignatureHex[0] = '\0';
+    for (i = 0; i < S3_SHA256_DIGEST_LENGTH; i++) {
+        buf_append(values->requestSignatureHex, "%02x", finalSignature[i]);
+    }
+
+    snprintf(values->authCredential, sizeof(values->authCredential),
+             "%s/%.8s/%s/s3/aws4_request", params->bucketContext.accessKeyId,
+             values->requestDateISO8601, awsRegion);
+
+    snprintf(
+            values->authorizationHeader,
+            sizeof(values->authorizationHeader),
+            "Authorization: AWS4-HMAC-SHA256 Credential=%s,SignedHeaders=%s,Signature=%s",
+            values->authCredential, values->signedHeaders,
+            values->requestSignatureHex);
+
+#ifdef SIGNATURE_DEBUG
+    printf("--\nAuthorization Header:\n%s\n", values->authorizationHeader);
+#endif
+
+    return S3StatusOK;
+
+#undef buf_append
+
+}
+
+
+// Compose the URI to use for the request given the request parameters
+static S3Status compose_uri(char *buffer, int bufferSize,
+                            const S3BucketContext *bucketContext,
+                            const char *urlEncodedKey,
+                            const char *subResource, const char *queryParams)
+{
+    int len = 0;
+
+#define uri_append(fmt, ...)                                                 \
+    do {                                                                     \
+        len += snprintf(&(buffer[len]), bufferSize - len, fmt, __VA_ARGS__); \
+        if (len >= bufferSize) {                                             \
+            return S3StatusUriTooLong;                                       \
+        }                                                                    \
+    } while (0)
+
+    uri_append("http%s://",
+               (bucketContext->protocol == S3ProtocolHTTP) ? "" : "s");
+
+    const char *hostName =
+        bucketContext->hostName ? bucketContext->hostName : defaultHostNameG;
+
+    if (bucketContext->bucketName &&
+        bucketContext->bucketName[0]) {
+        if (bucketContext->uriStyle == S3UriStyleVirtualHost) {
+            if (strchr(bucketContext->bucketName, '.') == NULL) {
+                uri_append("%s.%s", bucketContext->bucketName, hostName);
+            }
+            else {
+                // We'll use the hostName in the URL, and then explicitly set
+                // the Host header to match bucket.host so that host validation
+                // works.
+                uri_append("%s", hostName);
+            }
+        }
+        else {
+            uri_append("%s/%s", hostName, bucketContext->bucketName);
+        }
+    }
+    else {
+        uri_append("%s", hostName);
+    }
+
+    uri_append("%s", "/");
+
+    uri_append("%s", urlEncodedKey);
+
+    if (subResource && subResource[0]) {
+        uri_append("?%s", subResource);
+    }
+
+    if (queryParams) {
+        uri_append("%s%s", (subResource && subResource[0]) ? "&" : "?",
+                   queryParams);
+    }
+
+    return S3StatusOK;
+}
+
+// Sets up the curl handle given the completely computed RequestParams
+static S3Status setup_curl(Request *request,
+                           const RequestParams *params,
+                           const RequestComputedValues *values)
+{
+    CURLcode status;
+
+#define curl_easy_setopt_safe(opt, val)                                 \
+    if ((status = curl_easy_setopt                                      \
+         (request->curl, opt, val)) != CURLE_OK) {                      \
+        return S3StatusFailedToInitializeRequest;                       \
+    }
+
+    // Debugging only
+    // curl_easy_setopt_safe(CURLOPT_VERBOSE, 1);
+
+    // Set private data to request for the benefit of S3RequestContext
+    curl_easy_setopt_safe(CURLOPT_PRIVATE, request);
+
+    // Set header callback and data
+    curl_easy_setopt_safe(CURLOPT_HEADERDATA, request);
+    curl_easy_setopt_safe(CURLOPT_HEADERFUNCTION, &curl_header_func);
+
+    // Set read callback, data, and readSize
+    curl_easy_setopt_safe(CURLOPT_READFUNCTION, &curl_read_func);
+    curl_easy_setopt_safe(CURLOPT_READDATA, request);
+
+    // Set write callback and data
+    curl_easy_setopt_safe(CURLOPT_WRITEFUNCTION, &curl_write_func);
+    curl_easy_setopt_safe(CURLOPT_WRITEDATA, request);
+
+    // Ask curl to parse the Last-Modified header.  This is easier than
+    // parsing it ourselves.
+    curl_easy_setopt_safe(CURLOPT_FILETIME, 1);
+
+    // Curl docs suggest that this is necessary for multithreaded code.
+    // However, it also points out that DNS timeouts will not be honored
+    // during DNS lookup, which can be worked around by using the c-ares
+    // library, which we do not do yet.
+    curl_easy_setopt_safe(CURLOPT_NOSIGNAL, 1);
+
+    // Turn off Curl's built-in progress meter
+    curl_easy_setopt_safe(CURLOPT_NOPROGRESS, 1);
+
+    // xxx todo - support setting the proxy for Curl to use (can't use https
+    // for proxies though)
+
+    // xxx todo - support setting the network interface for Curl to use
+
+    // I think this is useful - we don't need interactive performance, we need
+    // to complete large operations quickly
+    curl_easy_setopt_safe(CURLOPT_TCP_NODELAY, 1);
+
+    // Don't use Curl's 'netrc' feature
+    curl_easy_setopt_safe(CURLOPT_NETRC, CURL_NETRC_IGNORED);
+
+    // Don't verify S3's certificate unless S3_INIT_VERIFY_PEER is set.
+    // The request_context may be set to override this
+    curl_easy_setopt_safe(CURLOPT_SSL_VERIFYPEER, verifyPeer);
+
+    // Follow any redirection directives that S3 sends
+    curl_easy_setopt_safe(CURLOPT_FOLLOWLOCATION, 1);
+
+    // A safety valve in case S3 goes bananas with redirects
+    curl_easy_setopt_safe(CURLOPT_MAXREDIRS, 10);
+
+    // Set the User-Agent; maybe Amazon will track these?
+    curl_easy_setopt_safe(CURLOPT_USERAGENT, userAgentG);
+
+    // Set the low speed limit and time; we abort transfers that stay at
+    // less than 1K per second for more than 15 seconds.
+    // xxx todo - make these configurable
+    // xxx todo - allow configurable max send and receive speed
+    curl_easy_setopt_safe(CURLOPT_LOW_SPEED_LIMIT, 1024);
+    curl_easy_setopt_safe(CURLOPT_LOW_SPEED_TIME, 15);
+
+
+    if (params->timeoutMs > 0) {
+        curl_easy_setopt_safe(CURLOPT_TIMEOUT_MS, params->timeoutMs);
+    }
+
+
+    // Append standard headers
+#define append_standard_header(fieldName)                               \
+    if (values-> fieldName [0]) {                                       \
+        request->headers = curl_slist_append(request->headers,          \
+                                             values-> fieldName);       \
+    }
+
+    // Would use CURLOPT_INFILESIZE_LARGE, but it is buggy in libcurl
+    if ((params->httpRequestType == HttpRequestTypePUT) ||
+        (params->httpRequestType == HttpRequestTypePOST)) {
+        char header[256];
+        snprintf(header, sizeof(header), "Content-Length: %llu",
+                 (unsigned long long) params->toS3CallbackTotalSize);
+        request->headers = curl_slist_append(request->headers, header);
+        request->headers = curl_slist_append(request->headers,
+                                             "Transfer-Encoding:");
+    }
+    else if (params->httpRequestType == HttpRequestTypeCOPY) {
+        request->headers = curl_slist_append(request->headers,
+                                             "Transfer-Encoding:");
+    }
+
+    append_standard_header(hostHeader);
+    append_standard_header(cacheControlHeader);
+    append_standard_header(contentTypeHeader);
+    append_standard_header(md5Header);
+    append_standard_header(contentDispositionHeader);
+    append_standard_header(contentEncodingHeader);
+    append_standard_header(expiresHeader);
+    append_standard_header(ifModifiedSinceHeader);
+    append_standard_header(ifUnmodifiedSinceHeader);
+    append_standard_header(ifMatchHeader);
+    append_standard_header(ifNoneMatchHeader);
+    append_standard_header(rangeHeader);
+    append_standard_header(authorizationHeader);
+
+    // Append x-amz- headers
+    int i;
+    for (i = 0; i < values->amzHeadersCount; i++) {
+        request->headers =
+            curl_slist_append(request->headers, values->amzHeaders[i]);
+    }
+
+    // Set the HTTP headers
+    curl_easy_setopt_safe(CURLOPT_HTTPHEADER, request->headers);
+
+    // Set URI
+    curl_easy_setopt_safe(CURLOPT_URL, request->uri);
+
+    // Set request type.
+    switch (params->httpRequestType) {
+    case HttpRequestTypeHEAD:
+        curl_easy_setopt_safe(CURLOPT_NOBODY, 1);
+        break;
+    case HttpRequestTypePOST:
+        curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "POST");
+        curl_easy_setopt_safe(CURLOPT_UPLOAD, 1);
+        break;
+
+    case HttpRequestTypePUT:
+    case HttpRequestTypeCOPY:
+        curl_easy_setopt_safe(CURLOPT_UPLOAD, 1);
+        break;
+    case HttpRequestTypeDELETE:
+        curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "DELETE");
+        break;
+    default: // HttpRequestTypeGET
+        break;
+    }
+
+    return S3StatusOK;
+}
+
+
+static void request_deinitialize(Request *request)
+{
+    if (request->headers) {
+        curl_slist_free_all(request->headers);
+    }
+
+    error_parser_deinitialize(&(request->errorParser));
+
+    // curl_easy_reset prevents connections from being re-used for some
+    // reason.  This makes HTTP Keep-Alive meaningless and is very bad for
+    // performance.  But it is necessary to allow curl to work properly.
+    // xxx todo figure out why
+    curl_easy_reset(request->curl);
+}
+
+
+static S3Status request_get(const RequestParams *params,
+                            const RequestComputedValues *values,
+                            Request **reqReturn)
+{
+    Request *request = 0;
+
+    // Try to get one from the request stack.  We hold the lock for the
+    // shortest time possible here.
+    pthread_mutex_lock(&requestStackMutexG);
+
+    if (requestStackCountG) {
+        request = requestStackG[--requestStackCountG];
+    }
+
+    pthread_mutex_unlock(&requestStackMutexG);
+
+    // If we got one, deinitialize it for re-use
+    if (request) {
+        request_deinitialize(request);
+    }
+    // Else there wasn't one available in the request stack, so create one
+    else {
+        if (!(request = (Request *) malloc(sizeof(Request)))) {
+            return S3StatusOutOfMemory;
+        }
+        if (!(request->curl = curl_easy_init())) {
+            free(request);
+            return S3StatusFailedToInitializeRequest;
+        }
+    }
+
+    // Initialize the request
+    request->prev = 0;
+    request->next = 0;
+
+    // Request status is initialized to no error, will be updated whenever
+    // an error occurs
+    request->status = S3StatusOK;
+
+    S3Status status;
+
+    // Start out with no headers
+    request->headers = 0;
+
+    // Compute the URL
+    if ((status = compose_uri
+         (request->uri, sizeof(request->uri),
+          &(params->bucketContext), values->urlEncodedKey,
+          params->subResource, params->queryParams)) != S3StatusOK) {
+        curl_easy_cleanup(request->curl);
+        free(request);
+        return status;
+    }
+
+    // Set all of the curl handle options
+    if ((status = setup_curl(request, params, values)) != S3StatusOK) {
+        curl_easy_cleanup(request->curl);
+        free(request);
+        return status;
+    }
+
+    request->propertiesCallback = params->propertiesCallback;
+
+    request->toS3Callback = params->toS3Callback;
+
+    request->toS3CallbackBytesRemaining = params->toS3CallbackTotalSize;
+
+    request->fromS3Callback = params->fromS3Callback;
+
+    request->completeCallback = params->completeCallback;
+
+    request->callbackData = params->callbackData;
+
+    response_headers_handler_initialize(&(request->responseHeadersHandler));
+
+    request->propertiesCallbackMade = 0;
+
+    error_parser_initialize(&(request->errorParser));
+
+    *reqReturn = request;
+
+    return S3StatusOK;
+}
+
+
+static void request_destroy(Request *request)
+{
+    request_deinitialize(request);
+    curl_easy_cleanup(request->curl);
+    free(request);
+}
+
+
+static void request_release(Request *request)
+{
+    pthread_mutex_lock(&requestStackMutexG);
+
+    // If the request stack is full, destroy this one
+    if (requestStackCountG == REQUEST_STACK_SIZE) {
+        pthread_mutex_unlock(&requestStackMutexG);
+        request_destroy(request);
+    }
+    // Else put this one at the front of the request stack; we do this because
+    // we want the most-recently-used curl handle to be re-used on the next
+    // request, to maximize our chances of re-using a TCP connection before it
+    // times out
+    else {
+        requestStackG[requestStackCountG++] = request;
+        pthread_mutex_unlock(&requestStackMutexG);
+    }
+}
+
+
+S3Status request_api_initialize(const char *userAgentInfo, int flags,
+                                const char *defaultHostName)
+{
+    if (curl_global_init(CURL_GLOBAL_ALL &
+                         ~((flags & S3_INIT_WINSOCK) ? 0 : CURL_GLOBAL_WIN32))
+        != CURLE_OK) {
+        return S3StatusInternalError;
+    }
+    verifyPeer = (flags & S3_INIT_VERIFY_PEER) != 0;
+
+    if (!defaultHostName) {
+        defaultHostName = S3_DEFAULT_HOSTNAME;
+    }
+
+    if (snprintf(defaultHostNameG, S3_MAX_HOSTNAME_SIZE,
+                 "%s", defaultHostName) >= S3_MAX_HOSTNAME_SIZE) {
+        return S3StatusUriTooLong;
+    }
+
+    pthread_mutex_init(&requestStackMutexG, 0);
+
+    requestStackCountG = 0;
+
+    if (!userAgentInfo || !*userAgentInfo) {
+        userAgentInfo = "Unknown";
+    }
+
+    char platform[96];
+    struct utsname utsn;
+    if (uname(&utsn)) {
+        snprintf(platform, sizeof(platform), "Unknown");
+    }
+    else {
+        snprintf(platform, sizeof(platform), "%s%s%s", utsn.sysname,
+                 utsn.machine[0] ? " " : "", utsn.machine);
+    }
+
+    snprintf(userAgentG, sizeof(userAgentG),
+             "Mozilla/4.0 (Compatible; %s; libs3 %s.%s; %s)",
+             userAgentInfo, LIBS3_VER_MAJOR, LIBS3_VER_MINOR, platform);
+
+    xmlInitParser();
+    return S3StatusOK;
+}
+
+
+void request_api_deinitialize()
+{
+    pthread_mutex_destroy(&requestStackMutexG);
+
+    xmlCleanupParser();
+    while (requestStackCountG--) {
+        request_destroy(requestStackG[requestStackCountG]);
+    }
+}
+
+static S3Status setup_request(const RequestParams *params,
+                              RequestComputedValues *computed,
+                              int forceUnsignedPayload)
+{
+    S3Status status;
+
+    // Validate the bucket name
+    if (params->bucketContext.bucketName
+        && ((status = S3_validate_bucket_name(params->bucketContext.bucketName,
+                                              params->bucketContext.uriStyle))
+            != S3StatusOK)) {
+        return status;
+    }
+
+    time_t now = time(NULL);
+    struct tm gmt;
+    gmtime_r(&now, &gmt);
+    strftime(computed->requestDateISO8601, sizeof(computed->requestDateISO8601),
+             "%Y%m%dT%H%M%SZ", &gmt);
+
+    // Compose the amz headers
+    if ((status = compose_amz_headers(params, forceUnsignedPayload, computed))
+        != S3StatusOK) {
+        return status;
+    }
+
+    // Compose standard headers
+    if ((status = compose_standard_headers(params, computed)) != S3StatusOK) {
+        return status;
+    }
+
+    // URL encode the key
+    if ((status = encode_key(params, computed)) != S3StatusOK) {
+        return status;
+    }
+
+    // Compute the canonicalized amz headers
+    canonicalize_signature_headers(computed);
+
+    // Compute the canonicalized resource
+    canonicalize_resource(&params->bucketContext, computed->urlEncodedKey,
+                          computed->canonicalURI);
+    canonicalize_query_string(params->queryParams, params->subResource,
+                              computed->canonicalQueryString);
+
+    // Compose Authorization header
+    if ((status = compose_auth_header(params, computed)) != S3StatusOK) {
+        return status;
+    }
+
+#ifdef SIGNATURE_DEBUG
+    int i = 0;
+    printf("\n--\nAMZ Headers:\n");
+    for (; i < computed->amzHeadersCount; i++) {
+        printf("%s\n", computed->amzHeaders[i]);
+    }
+#endif
+
+    return status;
+}
+
+void request_perform(const RequestParams *params, S3RequestContext *context)
+{
+    Request *request;
+    S3Status status;
+    int verifyPeerRequest = verifyPeer;
+    CURLcode curlstatus;
+
+#define return_status(status)                                           \
+    (*(params->completeCallback))(status, 0, params->callbackData);     \
+    return
+
+    // These will hold the computed values
+    RequestComputedValues computed;
+
+    if ((status = setup_request(params, &computed, 0)) != S3StatusOK) {
+        return_status(status);
+    }
+
+    // Get an initialized Request structure now
+    if ((status = request_get(params, &computed, &request)) != S3StatusOK) {
+        return_status(status);
+    }
+    if (context && context->verifyPeerSet) {
+        verifyPeerRequest = context->verifyPeerSet;
+    }
+    // Allow per-context override of verifyPeer
+    if (verifyPeerRequest != verifyPeer) {
+        if ((curlstatus = curl_easy_setopt(request->curl,
+                                           CURLOPT_SSL_VERIFYPEER,
+                                           context->verifyPeer))
+            != CURLE_OK) {
+            return_status(S3StatusFailedToInitializeRequest);
+        }
+    }
+
+    // If a RequestContext was provided, add the request to the curl multi
+    if (context) {
+        CURLMcode code = curl_multi_add_handle(context->curlm, request->curl);
+        if (code == CURLM_OK) {
+            if (context->requests) {
+                request->prev = context->requests->prev;
+                request->next = context->requests;
+                context->requests->prev->next = request;
+                context->requests->prev = request;
+            }
+            else {
+                context->requests = request->next = request->prev = request;
+            }
+        }
+        else {
+            if (request->status == S3StatusOK) {
+                request->status = (code == CURLM_OUT_OF_MEMORY) ?
+                    S3StatusOutOfMemory : S3StatusInternalError;
+            }
+            request_finish(request);
+        }
+    }
+    // Else, perform the request immediately
+    else {
+        CURLcode code = curl_easy_perform(request->curl);
+        if ((code != CURLE_OK) && (request->status == S3StatusOK)) {
+            request->status = request_curl_code_to_status(code);
+        }
+
+        // Finish the request, ensuring that all callbacks have been made, and
+        // also releases the request
+        request_finish(request);
+    }
+}
+
+
+void request_finish(Request *request)
+{
+    // If we haven't detected this already, we now know that the headers are
+    // definitely done being read in
+    request_headers_done(request);
+
+    // If there was no error processing the request, then possibly there was
+    // an S3 error parsed, which should be converted into the request status
+    if (request->status == S3StatusOK) {
+        error_parser_convert_status(&(request->errorParser),
+                                    &(request->status));
+        // If there still was no error recorded, then it is possible that
+        // there was in fact an error but that there was no error XML
+        // detailing the error
+        if ((request->status == S3StatusOK) &&
+            ((request->httpResponseCode < 200) ||
+             (request->httpResponseCode > 299))) {
+            switch (request->httpResponseCode) {
+            case 0:
+                // This happens if the request never got any HTTP response
+                // headers at all, we call this a ConnectionFailed error
+                request->status = S3StatusConnectionFailed;
+                break;
+            case 100: // Some versions of libcurl erroneously set HTTP
+                      // status to this
+                break;
+            case 301:
+                request->status = S3StatusErrorPermanentRedirect;
+                break;
+            case 307:
+                request->status = S3StatusHttpErrorMovedTemporarily;
+                break;
+            case 400:
+                request->status = S3StatusHttpErrorBadRequest;
+                break;
+            case 403:
+                request->status = S3StatusHttpErrorForbidden;
+                break;
+            case 404:
+                request->status = S3StatusHttpErrorNotFound;
+                break;
+            case 405:
+                request->status = S3StatusErrorMethodNotAllowed;
+                break;
+            case 409:
+                request->status = S3StatusHttpErrorConflict;
+                break;
+            case 411:
+                request->status = S3StatusErrorMissingContentLength;
+                break;
+            case 412:
+                request->status = S3StatusErrorPreconditionFailed;
+                break;
+            case 416:
+                request->status = S3StatusErrorInvalidRange;
+                break;
+            case 500:
+                request->status = S3StatusErrorInternalError;
+                break;
+            case 501:
+                request->status = S3StatusErrorNotImplemented;
+                break;
+            case 503:
+                request->status = S3StatusErrorSlowDown;
+                break;
+            default:
+                request->status = S3StatusHttpErrorUnknown;
+                break;
+            }
+        }
+    }
+
+    (*(request->completeCallback))
+        (request->status, &(request->errorParser.s3ErrorDetails),
+         request->callbackData);
+
+    request_release(request);
+}
+
+
+S3Status request_curl_code_to_status(CURLcode code)
+{
+    switch (code) {
+    case CURLE_OUT_OF_MEMORY:
+        return S3StatusOutOfMemory;
+    case CURLE_COULDNT_RESOLVE_PROXY:
+    case CURLE_COULDNT_RESOLVE_HOST:
+        return S3StatusNameLookupError;
+    case CURLE_COULDNT_CONNECT:
+        return S3StatusFailedToConnect;
+    case CURLE_WRITE_ERROR:
+    case CURLE_OPERATION_TIMEDOUT:
+        return S3StatusErrorRequestTimeout;
+    case CURLE_PARTIAL_FILE:
+        return S3StatusOK;
+#if LIBCURL_VERSION_NUM >= 0x071101 /* 7.17.1 */
+    case CURLE_PEER_FAILED_VERIFICATION:
+#else
+    case CURLE_SSL_PEER_CERTIFICATE:
+#endif
+    case CURLE_SSL_CACERT:
+        return S3StatusServerFailedVerification;
+    default:
+        return S3StatusInternalError;
+    }
+}
+
+
+S3Status S3_generate_authenticated_query_string
+    (char *buffer, const S3BucketContext *bucketContext,
+     const char *key, int expires, const char *resource,
+     const char *httpMethod)
+{
+    // maximum expiration period is seven days (in seconds)
+#define MAX_EXPIRES 604800
+
+    if (expires < 0) {
+        expires = MAX_EXPIRES;
+    }
+    else if (expires > MAX_EXPIRES) {
+        expires = MAX_EXPIRES;
+    }
+
+    RequestParams params =
+    { http_request_method_to_type(httpMethod), *bucketContext, key, NULL,
+        resource,
+        NULL, NULL, NULL, 0, 0, NULL, NULL, NULL, 0, NULL, NULL, NULL, 0};
+
+    RequestComputedValues computed;
+    S3Status status = setup_request(&params, &computed, 1);
+    if (status != S3StatusOK) {
+        return status;
+    }
+
+    // Finally, compose the URI, with params
+    char queryParams[sizeof("X-Amz-Algorithm=AWS4-HMAC-SHA256")
+        + sizeof("&X-Amz-Credential=") + MAX_CREDENTIAL_SIZE
+        + sizeof("&X-Amz-Date=") + 16 + sizeof("&X-Amz-Expires=") + 6
+        + sizeof("&X-Amz-SignedHeaders=") + 128 + sizeof("&X-Amz-Signature=")
+        + sizeof(computed.requestSignatureHex) + 1];
+
+    snprintf(queryParams, sizeof(queryParams),
+             "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=%s"
+             "&X-Amz-Date=%s&X-Amz-Expires=%d"
+             "&X-Amz-SignedHeaders=%s&X-Amz-Signature=%s",
+             computed.authCredential, computed.requestDateISO8601, expires,
+             computed.signedHeaders, computed.requestSignatureHex);
+
+    return compose_uri(buffer, S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE,
+                       bucketContext, computed.urlEncodedKey, resource,
+                       queryParams);
+}

+ 201 - 0
libs/libs3/src/request_context.c

@@ -0,0 +1,201 @@
+/** **************************************************************************
+ * request_context.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <curl/curl.h>
+#include <stdlib.h>
+#include <sys/select.h>
+#include "request.h"
+#include "request_context.h"
+
+
+S3Status S3_create_request_context(S3RequestContext **requestContextReturn)
+{
+    *requestContextReturn = 
+        (S3RequestContext *) malloc(sizeof(S3RequestContext));
+    
+    if (!*requestContextReturn) {
+        return S3StatusOutOfMemory;
+    }
+    
+    if (!((*requestContextReturn)->curlm = curl_multi_init())) {
+        free(*requestContextReturn);
+        return S3StatusOutOfMemory;
+    }
+
+    (*requestContextReturn)->requests = 0;
+    (*requestContextReturn)->verifyPeer = 0;
+    (*requestContextReturn)->verifyPeerSet = 0;
+
+    return S3StatusOK;
+}
+
+
+void S3_destroy_request_context(S3RequestContext *requestContext)
+{
+    // For each request in the context, remove curl handle, call back its done
+    // method with 'interrupted' status
+    Request *r = requestContext->requests, *rFirst = r;
+    
+    if (r) do {
+        r->status = S3StatusInterrupted;
+        // remove easy handle from a multi session
+        curl_multi_remove_handle(requestContext->curlm, r->curl);
+        Request *rNext = r->next;
+        request_finish(r);
+        r = rNext;
+    } while (r != rFirst);
+
+    curl_multi_cleanup(requestContext->curlm);
+
+    free(requestContext);
+}
+
+
+S3Status S3_runall_request_context(S3RequestContext *requestContext)
+{
+    int requestsRemaining;
+    do {
+        fd_set readfds, writefds, exceptfds;
+        FD_ZERO(&readfds);
+        FD_ZERO(&writefds);
+        FD_ZERO(&exceptfds);
+        int maxfd;
+        S3Status status = S3_get_request_context_fdsets
+            (requestContext, &readfds, &writefds, &exceptfds, &maxfd);
+        if (status != S3StatusOK) {
+            return status;
+        }
+        // curl will return -1 if it hasn't even created any fds yet because
+        // none of the connections have started yet.  In this case, don't
+        // do the select at all, because it will wait forever; instead, just
+        // skip it and go straight to running the underlying CURL handles
+        if (maxfd != -1) {
+            int64_t timeout = S3_get_request_context_timeout(requestContext);
+            struct timeval tv = { timeout / 1000, (timeout % 1000) * 1000 };
+            select(maxfd + 1, &readfds, &writefds, &exceptfds,
+                   (timeout == -1) ? 0 : &tv);
+        }
+        status = S3_runonce_request_context(requestContext,
+                                            &requestsRemaining);
+        if (status != S3StatusOK) {
+            return status;
+        }
+    } while (requestsRemaining);
+    
+    return S3StatusOK;
+}
+
+
+S3Status S3_runonce_request_context(S3RequestContext *requestContext, 
+                                    int *requestsRemainingReturn)
+{
+    CURLMcode status;
+
+    do {
+        status = curl_multi_perform(requestContext->curlm,
+                                    requestsRemainingReturn);
+
+        switch (status) {
+        case CURLM_OK:
+        case CURLM_CALL_MULTI_PERFORM:
+            break;
+        case CURLM_OUT_OF_MEMORY:
+            return S3StatusOutOfMemory;
+        default:
+            return S3StatusInternalError;
+        }
+
+        CURLMsg *msg;
+        int junk;
+        while ((msg = curl_multi_info_read(requestContext->curlm, &junk))) {
+            if (msg->msg != CURLMSG_DONE) {
+                return S3StatusInternalError;
+            }
+            Request *request;
+            if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, 
+                                  (char **) (char *) &request) != CURLE_OK) {
+                return S3StatusInternalError;
+            }
+            // Remove the request from the list of requests
+            if (request->prev == request->next) {
+                // It was the only one on the list
+                requestContext->requests = 0;
+            }
+            else {
+                // It doesn't matter what the order of them are, so just in
+                // case request was at the head of the list, put the one after
+                // request to the head of the list
+                requestContext->requests = request->next;
+                request->prev->next = request->next;
+                request->next->prev = request->prev;
+            }
+            if ((msg->data.result != CURLE_OK) &&
+                (request->status == S3StatusOK)) {
+                request->status = request_curl_code_to_status
+                    (msg->data.result);
+            }
+            if (curl_multi_remove_handle(requestContext->curlm, 
+                                         msg->easy_handle) != CURLM_OK) {
+                return S3StatusInternalError;
+            }
+            // Finish the request, ensuring that all callbacks have been made,
+            // and also releases the request
+            request_finish(request);
+            // Now, since a callback was made, there may be new requests 
+            // queued up to be performed immediately, so do so
+            status = CURLM_CALL_MULTI_PERFORM;
+        }
+    } while (status == CURLM_CALL_MULTI_PERFORM);
+
+    return S3StatusOK;
+}
+
+S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext,
+                                       fd_set *readFdSet, fd_set *writeFdSet,
+                                       fd_set *exceptFdSet, int *maxFd)
+{
+    return ((curl_multi_fdset(requestContext->curlm, readFdSet, writeFdSet,
+                              exceptFdSet, maxFd) == CURLM_OK) ?
+            S3StatusOK : S3StatusInternalError);
+}
+
+int64_t S3_get_request_context_timeout(S3RequestContext *requestContext)
+{
+    long timeout;
+
+    if (curl_multi_timeout(requestContext->curlm, &timeout) != CURLM_OK) {
+        timeout = 0;
+    }
+    
+    return timeout;
+}
+
+void S3_set_request_context_verify_peer(S3RequestContext *requestContext,
+                                        int verifyPeer)
+{
+    requestContext->verifyPeerSet = 1;
+    requestContext->verifyPeer = (verifyPeer != 0);
+}

+ 215 - 0
libs/libs3/src/response_headers_handler.c

@@ -0,0 +1,215 @@
+/** **************************************************************************
+ * response_headers_handler.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <ctype.h>
+#include <string.h>
+#include <strings.h>
+#include "response_headers_handler.h"
+
+
+void response_headers_handler_initialize(ResponseHeadersHandler *handler)
+{
+    handler->responseProperties.requestId = 0;
+    handler->responseProperties.requestId2 = 0;
+    handler->responseProperties.contentType = 0;
+    handler->responseProperties.contentLength = 0;
+    handler->responseProperties.server = 0;
+    handler->responseProperties.eTag = 0;
+    handler->responseProperties.lastModified = -1;
+    handler->responseProperties.metaDataCount = 0;
+    handler->responseProperties.metaData = 0;
+    handler->responseProperties.usesServerSideEncryption = 0;
+    handler->done = 0;
+    string_multibuffer_initialize(handler->responsePropertyStrings);
+    string_multibuffer_initialize(handler->responseMetaDataStrings);
+}
+
+
+void response_headers_handler_add(ResponseHeadersHandler *handler,
+                                  char *header, int len)
+{
+    S3ResponseProperties *responseProperties = &(handler->responseProperties);
+    char *end = &(header[len]);
+    
+    // Curl might call back the header function after the body has been
+    // received, for 'chunked encoded' contents.  We don't handle this as of
+    // yet, and it's not clear that it would ever be useful.
+    if (handler->done) {
+        return;
+    }
+
+    // If we've already filled up the response headers, ignore this data.
+    // This sucks, but it shouldn't happen - S3 should not be sending back
+    // really long headers.
+    if (handler->responsePropertyStringsSize == 
+        (sizeof(handler->responsePropertyStrings) - 1)) {
+        return;
+    }
+
+    // It should not be possible to have a header line less than 3 long
+    if (len < 3) {
+        return;
+    }
+
+    // Skip whitespace at beginning of header; there never should be any,
+    // but just to be safe
+    while (is_blank(*header)) {
+        header++;
+    }
+
+    // The header must end in \r\n, so skip back over it, and also over any
+    // trailing whitespace
+    end -= 3;
+    while ((end > header) && is_blank(*end)) {
+        end--;
+    }
+    if (!is_blank(*end)) {
+        end++;
+    }
+
+    if (end == header) {
+        // totally bogus
+        return;
+    }
+
+    *end = 0;
+    
+    // Find the colon to split the header up
+    char *c = header;
+    while (*c && (*c != ':')) {
+        c++;
+    }
+    
+    int namelen = c - header;
+
+    // Now walk c past the colon
+    c++;
+    // Now skip whitespace to the beginning of the value
+    while (is_blank(*c)) {
+        c++;
+    }
+
+    int valuelen = (end - c) + 1, fit;
+
+    if (!strncasecmp(header, "x-amz-request-id", namelen)) {
+        responseProperties->requestId = 
+            string_multibuffer_current(handler->responsePropertyStrings);
+        string_multibuffer_add(handler->responsePropertyStrings, c, 
+                               valuelen, fit);
+    }
+    else if (!strncasecmp(header, "x-amz-id-2", namelen)) {
+        responseProperties->requestId2 = 
+            string_multibuffer_current(handler->responsePropertyStrings);
+        string_multibuffer_add(handler->responsePropertyStrings, c, 
+                               valuelen, fit);
+    }
+    else if (!strncasecmp(header, "Content-Type", namelen)) {
+        responseProperties->contentType = 
+            string_multibuffer_current(handler->responsePropertyStrings);
+        string_multibuffer_add(handler->responsePropertyStrings, c, 
+                               valuelen, fit);
+    }
+    else if (!strncasecmp(header, "Content-Length", namelen)) {
+        handler->responseProperties.contentLength = 0;
+        while (*c) {
+            handler->responseProperties.contentLength *= 10;
+            handler->responseProperties.contentLength += (*c++ - '0');
+        }
+    }
+    else if (!strncasecmp(header, "Server", namelen)) {
+        responseProperties->server = 
+            string_multibuffer_current(handler->responsePropertyStrings);
+        string_multibuffer_add(handler->responsePropertyStrings, c, 
+                               valuelen, fit);
+    }
+    else if (!strncasecmp(header, "ETag", namelen)) {
+        responseProperties->eTag = 
+            string_multibuffer_current(handler->responsePropertyStrings);
+        string_multibuffer_add(handler->responsePropertyStrings, c, 
+                               valuelen, fit);
+    }
+    else if (!strncasecmp(header, S3_METADATA_HEADER_NAME_PREFIX, 
+                      sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1)) {
+        // Make sure there is room for another x-amz-meta header
+        if (handler->responseProperties.metaDataCount ==
+            sizeof(handler->responseMetaData)) {
+            return;
+        }
+        // Copy the name in
+        char *metaName = &(header[sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1]);
+        int metaNameLen = 
+            (namelen - (sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1));
+        char *copiedName = 
+            string_multibuffer_current(handler->responseMetaDataStrings);
+        string_multibuffer_add(handler->responseMetaDataStrings, metaName,
+                               metaNameLen, fit);
+        if (!fit) {
+            return;
+        }
+
+        // Copy the value in
+        char *copiedValue = 
+            string_multibuffer_current(handler->responseMetaDataStrings);
+        string_multibuffer_add(handler->responseMetaDataStrings,
+                               c, valuelen, fit);
+        if (!fit) {
+            return;
+        }
+
+        if (!handler->responseProperties.metaDataCount) {
+            handler->responseProperties.metaData = 
+                handler->responseMetaData;
+        }
+
+        S3NameValue *metaHeader = 
+            &(handler->responseMetaData
+              [handler->responseProperties.metaDataCount++]);
+        metaHeader->name = copiedName;
+        metaHeader->value = copiedValue;
+    }
+    else if (!strncasecmp(header, "x-amz-server-side-encryption", namelen)) {
+        if (!strncmp(c, "AES256", sizeof("AES256") - 1)) {
+            responseProperties->usesServerSideEncryption = 1;
+        }
+        // Ignore other values - only AES256 is expected, anything else is
+        // assumed to be "None" or some other value indicating no server-side
+        // encryption
+    }
+}
+
+
+void response_headers_handler_done(ResponseHeadersHandler *handler, CURL *curl)
+{
+    // Now get the last modification time from curl, since it's easiest to let
+    // curl parse it
+    time_t lastModified;
+    if (curl_easy_getinfo
+        (curl, CURLINFO_FILETIME, &lastModified) == CURLE_OK) {
+        handler->responseProperties.lastModified = lastModified;
+    }
+    
+    handler->done = 1;
+}

+ 4040 - 0
libs/libs3/src/s3.c

@@ -0,0 +1,4040 @@
+/** **************************************************************************
+ * s3.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+/**
+ * This is a 'driver' program that simply converts command-line input into
+ * calls to libs3 functions, and prints the results.
+ **/
+
+#define _XOPEN_SOURCE 600
+#include <ctype.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include "libs3.h"
+
+// Some Windows stuff
+#ifndef FOPEN_EXTRA_FLAGS
+#define FOPEN_EXTRA_FLAGS ""
+#endif
+
+// Some Unix stuff (to work around Windows issues)
+#ifndef SLEEP_UNITS_PER_SECOND
+#define SLEEP_UNITS_PER_SECOND 1
+#endif
+
+// Also needed for Windows, because somehow MinGW doesn't define this
+extern int putenv(char *);
+
+
+// Command-line options, saved as globals ------------------------------------
+
+static int forceG = 0;
+static int showResponsePropertiesG = 0;
+static S3Protocol protocolG = S3ProtocolHTTPS;
+static S3UriStyle uriStyleG = S3UriStylePath;
+static int retriesG = 5;
+static int timeoutMsG = 0;
+static int verifyPeerG = 0;
+static const char *awsRegionG = NULL;
+
+
+// Environment variables, saved as globals ----------------------------------
+
+static const char *accessKeyIdG = 0;
+static const char *secretAccessKeyG = 0;
+
+
+// Request results, saved as globals -----------------------------------------
+
+static int statusG = 0;
+static char errorDetailsG[4096] = { 0 };
+
+
+// Other globals -------------------------------------------------------------
+
+static char putenvBufG[256];
+
+
+// Option prefixes -----------------------------------------------------------
+
+#define LOCATION_PREFIX "location="
+#define LOCATION_PREFIX_LEN (sizeof(LOCATION_PREFIX) - 1)
+#define CANNED_ACL_PREFIX "cannedAcl="
+#define CANNED_ACL_PREFIX_LEN (sizeof(CANNED_ACL_PREFIX) - 1)
+#define PREFIX_PREFIX "prefix="
+#define PREFIX_PREFIX_LEN (sizeof(PREFIX_PREFIX) - 1)
+#define MARKER_PREFIX "marker="
+#define MARKER_PREFIX_LEN (sizeof(MARKER_PREFIX) - 1)
+#define DELIMITER_PREFIX "delimiter="
+#define DELIMITER_PREFIX_LEN (sizeof(DELIMITER_PREFIX) - 1)
+#define ENCODING_TYPE_PREFIX "encoding-type="
+#define ENCODING_TYPE_PREFIX_LEN (sizeof(ENCODING_TYPE_PREFIX) - 1)
+#define MAX_UPLOADS_PREFIX "max-uploads="
+#define MAX_UPLOADS_PREFIX_LEN (sizeof(MAX_UPLOADS_PREFIX) - 1)
+#define KEY_MARKER_PREFIX "key-marker="
+#define KEY_MARKER_PREFIX_LEN (sizeof(KEY_MARKER_PREFIX) - 1)
+#define UPLOAD_ID_PREFIX "upload-id="
+#define UPLOAD_ID_PREFIX_LEN (sizeof(UPLOAD_ID_PREFIX) - 1)
+#define MAX_PARTS_PREFIX "max-parts="
+#define MAX_PARTS_PREFIX_LEN (sizeof(MAX_PARTS_PREFIX) - 1)
+#define PART_NUMBER_MARKER_PREFIX "part-number-marker="
+#define PART_NUMBER_MARKER_PREFIX_LEN (sizeof(PART_NUMBER_MARKER_PREFIX) - 1)
+#define UPLOAD_ID_MARKER_PREFIX "upload-id-marker="
+#define UPLOAD_ID_MARKER_PREFIX_LEN (sizeof(UPLOAD_ID_MARKER_PREFIX) - 1)
+#define MAXKEYS_PREFIX "maxkeys="
+#define MAXKEYS_PREFIX_LEN (sizeof(MAXKEYS_PREFIX) - 1)
+#define FILENAME_PREFIX "filename="
+#define FILENAME_PREFIX_LEN (sizeof(FILENAME_PREFIX) - 1)
+#define CONTENT_LENGTH_PREFIX "contentLength="
+#define CONTENT_LENGTH_PREFIX_LEN (sizeof(CONTENT_LENGTH_PREFIX) - 1)
+#define CACHE_CONTROL_PREFIX "cacheControl="
+#define CACHE_CONTROL_PREFIX_LEN (sizeof(CACHE_CONTROL_PREFIX) - 1)
+#define CONTENT_TYPE_PREFIX "contentType="
+#define CONTENT_TYPE_PREFIX_LEN (sizeof(CONTENT_TYPE_PREFIX) - 1)
+#define MD5_PREFIX "md5="
+#define MD5_PREFIX_LEN (sizeof(MD5_PREFIX) - 1)
+#define CONTENT_DISPOSITION_FILENAME_PREFIX "contentDispositionFilename="
+#define CONTENT_DISPOSITION_FILENAME_PREFIX_LEN \
+    (sizeof(CONTENT_DISPOSITION_FILENAME_PREFIX) - 1)
+#define CONTENT_ENCODING_PREFIX "contentEncoding="
+#define CONTENT_ENCODING_PREFIX_LEN (sizeof(CONTENT_ENCODING_PREFIX) - 1)
+#define EXPIRES_PREFIX "expires="
+#define EXPIRES_PREFIX_LEN (sizeof(EXPIRES_PREFIX) - 1)
+#define X_AMZ_META_PREFIX "x-amz-meta-"
+#define X_AMZ_META_PREFIX_LEN (sizeof(X_AMZ_META_PREFIX) - 1)
+#define USE_SERVER_SIDE_ENCRYPTION_PREFIX "useServerSideEncryption="
+#define USE_SERVER_SIDE_ENCRYPTION_PREFIX_LEN \
+    (sizeof(USE_SERVER_SIDE_ENCRYPTION_PREFIX) - 1)
+#define IF_MODIFIED_SINCE_PREFIX "ifModifiedSince="
+#define IF_MODIFIED_SINCE_PREFIX_LEN (sizeof(IF_MODIFIED_SINCE_PREFIX) - 1)
+#define IF_NOT_MODIFIED_SINCE_PREFIX "ifNotmodifiedSince="
+#define IF_NOT_MODIFIED_SINCE_PREFIX_LEN \
+    (sizeof(IF_NOT_MODIFIED_SINCE_PREFIX) - 1)
+#define IF_MATCH_PREFIX "ifMatch="
+#define IF_MATCH_PREFIX_LEN (sizeof(IF_MATCH_PREFIX) - 1)
+#define IF_NOT_MATCH_PREFIX "ifNotMatch="
+#define IF_NOT_MATCH_PREFIX_LEN (sizeof(IF_NOT_MATCH_PREFIX) - 1)
+#define START_BYTE_PREFIX "startByte="
+#define START_BYTE_PREFIX_LEN (sizeof(START_BYTE_PREFIX) - 1)
+#define BYTE_COUNT_PREFIX "byteCount="
+#define BYTE_COUNT_PREFIX_LEN (sizeof(BYTE_COUNT_PREFIX) - 1)
+#define ALL_DETAILS_PREFIX "allDetails="
+#define ALL_DETAILS_PREFIX_LEN (sizeof(ALL_DETAILS_PREFIX) - 1)
+#define NO_STATUS_PREFIX "noStatus="
+#define NO_STATUS_PREFIX_LEN (sizeof(NO_STATUS_PREFIX) - 1)
+#define RESOURCE_PREFIX "resource="
+#define RESOURCE_PREFIX_LEN (sizeof(RESOURCE_PREFIX) - 1)
+#define TARGET_BUCKET_PREFIX "targetBucket="
+#define TARGET_BUCKET_PREFIX_LEN (sizeof(TARGET_BUCKET_PREFIX) - 1)
+#define TARGET_PREFIX_PREFIX "targetPrefix="
+#define TARGET_PREFIX_PREFIX_LEN (sizeof(TARGET_PREFIX_PREFIX) - 1)
+#define HTTP_METHOD_PREFIX "method="
+#define HTTP_METHOD_PREFIX_LEN (sizeof(HTTP_METHOD_PREFIX) - 1)
+
+
+// util ----------------------------------------------------------------------
+
+static void S3_init()
+{
+    S3Status status;
+    const char *hostname = getenv("S3_HOSTNAME");
+
+    if ((status = S3_initialize("s3", verifyPeerG|S3_INIT_ALL, hostname))
+        != S3StatusOK) {
+        fprintf(stderr, "Failed to initialize libs3: %s\n",
+                S3_get_status_name(status));
+        exit(-1);
+    }
+}
+
+
+static void printError()
+{
+    if (statusG < S3StatusErrorAccessDenied) {
+        fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
+    }
+    else {
+        fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
+        fprintf(stderr, "%s\n", errorDetailsG);
+    }
+}
+
+
+static void usageExit(FILE *out)
+{
+    fprintf(out,
+"\n Options:\n"
+"\n"
+"   Command Line:\n"
+"\n"
+"   -f/--force           : force operation despite warnings\n"
+"   -h/--vhost-style     : use virtual-host-style URIs (default is "
+                          "path-style)\n"
+"   -u/--unencrypted     : unencrypted (use HTTP instead of HTTPS)\n"
+"   -s/--show-properties : show response properties on stdout\n"
+"   -r/--retries         : retry retryable failures this number of times\n"
+"                          (default is 5)\n"
+"   -t/--timeout         : request timeout, milliseconds. 0 if waiting forever\n"
+"                          (default is 0)\n"
+"   -v/--verify-peer     : verify peer SSL certificate (default is no)\n"
+"   -g/--region <REGION> : use <REGION> for request authorization\n"
+"\n"
+"   Environment:\n"
+"\n"
+"   S3_ACCESS_KEY_ID     : S3 access key ID (required)\n"
+"   S3_SECRET_ACCESS_KEY : S3 secret access key (required)\n"
+"   S3_HOSTNAME          : specify alternative S3 host (optional)\n"
+"\n"
+" Commands (with <required parameters> and [optional parameters]) :\n"
+"\n"
+"   (NOTE: all command parameters take a value and are specified using the\n"
+"          pattern parameter=value)\n"
+"\n"
+"   help                 : Prints this help text\n"
+"\n"
+"   list                 : Lists owned buckets\n"
+"     [allDetails]       : Show full details\n"
+"\n"
+"   test                 : Tests a bucket for existence and accessibility\n"
+"     <bucket>           : Bucket to test\n"
+"\n"
+"   create               : Create a new bucket\n"
+"     <bucket>           : Bucket to create\n"
+"     [cannedAcl]        : Canned ACL for the bucket (see Canned ACLs)\n"
+"     [location]         : Location for bucket (for example, EU)\n"
+"\n"
+"   delete               : Delete a bucket or key\n"
+"     <bucket>[/<key>]   : Bucket or bucket/key to delete\n"
+"\n"
+"   list                 : List bucket contents\n"
+"     <bucket>           : Bucket to list\n"
+"     [prefix]           : Prefix for results set\n"
+"     [marker]           : Where in results set to start listing\n"
+"     [delimiter]        : Delimiter for rolling up results set\n"
+"     [maxkeys]          : Maximum number of keys to return in results set\n"
+"     [allDetails]       : Show full details for each key\n"
+"\n"
+"   getacl               : Get the ACL of a bucket or key\n"
+"     <bucket>[/<key>]   : Bucket or bucket/key to get the ACL of\n"
+"     [filename]         : Output filename for ACL (default is stdout)\n"
+"\n"
+"   setacl               : Set the ACL of a bucket or key\n"
+"     <bucket>[/<key>]   : Bucket or bucket/key to set the ACL of\n"
+"     [filename]         : Input filename for ACL (default is stdin)\n"
+"   getlifecycle         : Get the lifecycle of a bucket\n"
+"     <bucket>           : Bucket or bucket to get the lifecycle of\n"
+"     [filename]         : Output filename for lifecycle (default is stdout)\n"
+"\n"
+"   setlifecycle         : Set the lifecycle of a bucket or key\n"
+"     <bucket>           : Bucket or bucket to set the lifecycle of\n"
+"     [filename]         : Input filename for lifecycle (default is stdin)\n"
+"\n"
+"   getlogging           : Get the logging status of a bucket\n"
+"     <bucket>           : Bucket to get the logging status of\n"
+"     [filename]         : Output filename for logging (default is stdout)\n"
+"\n"
+"   setlogging           : Set the logging status of a bucket\n"
+"     <bucket>           : Bucket to set the logging status of\n"
+"     [targetBucket]     : Target bucket to log to; if not present, disables\n"
+"                          logging\n"
+"     [targetPrefix]     : Key prefix to use for logs\n"
+"     [filename]         : Input filename for logging (default is stdin)\n"
+"\n"
+"   put                  : Puts an object\n"
+"     <bucket>/<key>     : Bucket/key to put object to\n"
+"     [filename]         : Filename to read source data from "
+                          "(default is stdin)\n"
+"     [contentLength]    : How many bytes of source data to put (required if\n"
+"                          source file is stdin)\n"
+"     [cacheControl]     : Cache-Control HTTP header string to associate with\n"
+"                          object\n"
+"     [contentType]      : Content-Type HTTP header string to associate with\n"
+"                          object\n"
+"     [md5]              : MD5 for validating source data\n"
+"     [contentDispositionFilename] : Content-Disposition filename string to\n"
+"                          associate with object\n"
+"     [contentEncoding]  : Content-Encoding HTTP header string to associate\n"
+"                          with object\n"
+"     [expires]          : Expiration date to associate with object\n"
+"     [cannedAcl]        : Canned ACL for the object (see Canned ACLs)\n"
+"     [x-amz-meta-...]]  : Metadata headers to associate with the object\n"
+"     [useServerSideEncryption] : Whether or not to use server-side\n"
+"                          encryption for the object\n"
+"     [upload-id]        : Upload-id of a uncomplete multipart upload, if you \n"
+"                          want to continue to put the object, you must specifil\n"
+"\n"
+"   copy                 : Copies an object; if any options are set, the "
+                          "entire\n"
+"                          metadata of the object is replaced\n"
+"     <sourcebucket>/<sourcekey> : Source bucket/key\n"
+"     <destbucket>/<destkey> : Destination bucket/key\n"
+"     [cacheControl]     : Cache-Control HTTP header string to associate with\n"
+"                          object\n"
+"     [contentType]      : Content-Type HTTP header string to associate with\n"
+"                          object\n"
+"     [contentDispositionFilename] : Content-Disposition filename string to\n"
+"                          associate with object\n"
+"     [contentEncoding]  : Content-Encoding HTTP header string to associate\n"
+"                          with object\n"
+"     [expires]          : Expiration date to associate with object\n"
+"     [cannedAcl]        : Canned ACL for the object (see Canned ACLs)\n"
+"     [x-amz-meta-...]]  : Metadata headers to associate with the object\n"
+"\n"
+"   get                  : Gets an object\n"
+"     <buckey>/<key>     : Bucket/key of object to get\n"
+"     [filename]         : Filename to write object data to (required if -s\n"
+"                          command line parameter was used)\n"
+"     [ifModifiedSince]  : Only return the object if it has been modified "
+                          "since\n"
+"                          this date\n"
+"     [ifNotmodifiedSince] : Only return the object if it has not been "
+                          "modified\n"
+"                          since this date\n"
+"     [ifMatch]          : Only return the object if its ETag header matches\n"
+"                          this string\n"
+"     [ifNotMatch]       : Only return the object if its ETag header does "
+                          "not\n"
+"                          match this string\n"
+"     [startByte]        : First byte of byte range to return\n"
+"     [byteCount]        : Number of bytes of byte range to return\n"
+"\n"
+"   head                 : Gets only the headers of an object, implies -s\n"
+"     <bucket>/<key>     : Bucket/key of object to get headers of\n"
+"\n"
+"   gqs                  : Generates an authenticated query string\n"
+"     <bucket>[/<key>]   : Bucket or bucket/key to generate query string for\n"
+"     [expires]          : Expiration date for query string\n"
+"     [resource]         : Sub-resource of key for query string, without a\n"
+"                          leading '?', for example, \"torrent\"\n"
+"     [method]           : HTTP method for use with the query string\n"
+"                        : (default is \"GET\")"
+"\n"
+"   listmultiparts       : Show multipart uploads\n"
+"     <bucket>           : Bucket multipart uploads belongs to\n"
+"     [key-marker]       : this parameter specifies the multipart upload after which listing should begin.\n"
+"     [upload-id-marker] : Together with key-marker, specifies the multipart upload after which listing should begin\n"
+"     [delimiter]        : Character you use to group keys.\n"
+"     [max-uploads]      : Sets the maximum number of multipart uploads, from 1 to 1,000\n"
+"     [encoding-type]    : Requests Amazon S3 to encode the response and specifies the encoding method to use.\n"
+"\n"
+"   abortmp              : aborts a multipart upload.\n"
+"     <bucket>/<key>     : Bucket/key of upload belongs to.\n"
+"     [upload-id]        : upload-id of this upload\n"
+"\n"
+"   listparts            : lists the parts that have been uploaded for a specific multipart upload.\n"
+"     <bucket>/<key>     : Bucket/key of upload belongs to\n"
+"     [upload-id]        : upload-id of this upload\n"
+"     [max-parts]        : Sets the maximum number of parts to return in the response body.\n"
+"     [encoding-type]    : Requests Amazon S3 to encode the response and specifies the encoding method to use.\n"
+"     [part-number-marker] : Specifies the part after which listing should begin.\n"
+"\n"
+" Canned ACLs:\n"
+"\n"
+"  The following canned ACLs are supported:\n"
+"    private (default), public-read, public-read-write, authenticated-read\n"
+"\n"
+" ACL Format:\n"
+"\n"
+"  For the getacl and setacl commands, the format of the ACL list is:\n"
+"  1) An initial line giving the owner id in this format:\n"
+"       OwnerID <Owner ID> <Owner Display Name>\n"
+"  2) Optional header lines, giving column headers, starting with the\n"
+"     word \"Type\", or with some number of dashes\n"
+"  3) Grant lines, of the form:\n"
+"       <Grant Type> (whitespace) <Grantee> (whitespace) <Permission>\n"
+"     where Grant Type is one of: Email, UserID, or Group, and\n"
+"     Grantee is the identification of the grantee based on this type,\n"
+"     and Permission is one of: READ, WRITE, READ_ACP, or FULL_CONTROL.\n"
+"\n"
+"  Note that the easiest way to modify an ACL is to first get it, saving it\n"
+"  into a file, then modifying the file, and then setting the modified file\n"
+"  back as the new ACL for the bucket/object.\n"
+"\n"
+" Date Format:\n"
+"\n"
+"  The format for dates used in parameters is as ISO 8601 dates, i.e.\n"
+"  YYYY-MM-DDTHH:MM:SS[+/-dd:dd].  Examples:\n"
+"      2008-07-29T20:36:14\n"
+"      2008-07-29T20:36:14-06:00\n"
+"      2008-07-29T20:36:14+11:30\n"
+"\n");
+
+    exit(-1);
+}
+
+
+static uint64_t convertInt(const char *str, const char *paramName)
+{
+    uint64_t ret = 0;
+
+    while (*str) {
+        if (!isdigit(*str)) {
+            fprintf(stderr, "\nERROR: Nondigit in %s parameter: %c\n",
+                    paramName, *str);
+            usageExit(stderr);
+        }
+        ret *= 10;
+        ret += (*str++ - '0');
+    }
+
+    return ret;
+}
+
+
+typedef struct growbuffer
+{
+    // The total number of bytes, and the start byte
+    int size;
+    // The start byte
+    int start;
+    // The blocks
+    char data[64 * 1024];
+    struct growbuffer *prev, *next;
+} growbuffer;
+
+
+// returns nonzero on success, zero on out of memory
+static int growbuffer_append(growbuffer **gb, const char *data, int dataLen)
+{
+    int toCopy = 0 ;
+    while (dataLen) {
+        growbuffer *buf = *gb ? (*gb)->prev : 0;
+        if (!buf || (buf->size == sizeof(buf->data))) {
+            buf = (growbuffer *) malloc(sizeof(growbuffer));
+            if (!buf) {
+                return 0;
+            }
+            buf->size = 0;
+            buf->start = 0;
+            if (*gb && (*gb)->prev) {
+                buf->prev = (*gb)->prev;
+                buf->next = *gb;
+                (*gb)->prev->next = buf;
+                (*gb)->prev = buf;
+            }
+            else {
+                buf->prev = buf->next = buf;
+                *gb = buf;
+            }
+        }
+
+        toCopy = (sizeof(buf->data) - buf->size);
+        if (toCopy > dataLen) {
+            toCopy = dataLen;
+        }
+
+        memcpy(&(buf->data[buf->size]), data, toCopy);
+
+        buf->size += toCopy, data += toCopy, dataLen -= toCopy;
+    }
+
+    return toCopy;
+}
+
+
+static void growbuffer_read(growbuffer **gb, int amt, int *amtReturn,
+                            char *buffer)
+{
+    *amtReturn = 0;
+
+    growbuffer *buf = *gb;
+
+    if (!buf) {
+        return;
+    }
+
+    *amtReturn = (buf->size > amt) ? amt : buf->size;
+
+    memcpy(buffer, &(buf->data[buf->start]), *amtReturn);
+
+    buf->start += *amtReturn, buf->size -= *amtReturn;
+
+    if (buf->size == 0) {
+        if (buf->next == buf) {
+            *gb = 0;
+        }
+        else {
+            *gb = buf->next;
+            buf->prev->next = buf->next;
+            buf->next->prev = buf->prev;
+        }
+        free(buf);
+    }
+}
+
+
+static void growbuffer_destroy(growbuffer *gb)
+{
+    growbuffer *start = gb;
+
+    while (gb) {
+        growbuffer *next = gb->next;
+        free(gb);
+        gb = (next == start) ? 0 : next;
+    }
+}
+
+
+// Convenience utility for making the code look nicer.  Tests a string
+// against a format; only the characters specified in the format are
+// checked (i.e. if the string is longer than the format, the string still
+// checks out ok).  Format characters are:
+// d - is a digit
+// anything else - is that character
+// Returns nonzero the string checks out, zero if it does not.
+static int checkString(const char *str, const char *format)
+{
+    while (*format) {
+        if (*format == 'd') {
+            if (!isdigit(*str)) {
+                return 0;
+            }
+        }
+        else if (*str != *format) {
+            return 0;
+        }
+        str++, format++;
+    }
+
+    return 1;
+}
+
+
+static int64_t parseIso8601Time(const char *str)
+{
+    // Check to make sure that it has a valid format
+    if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
+        return -1;
+    }
+
+#define nextnum() (((*str - '0') * 10) + (*(str + 1) - '0'))
+
+    // Convert it
+    struct tm stm;
+    memset(&stm, 0, sizeof(stm));
+
+    stm.tm_year = (nextnum() - 19) * 100;
+    str += 2;
+    stm.tm_year += nextnum();
+    str += 3;
+
+    stm.tm_mon = nextnum() - 1;
+    str += 3;
+
+    stm.tm_mday = nextnum();
+    str += 3;
+
+    stm.tm_hour = nextnum();
+    str += 3;
+
+    stm.tm_min = nextnum();
+    str += 3;
+
+    stm.tm_sec = nextnum();
+    str += 2;
+
+    stm.tm_isdst = -1;
+
+    // This is hokey but it's the recommended way ...
+    char *tz = getenv("TZ");
+    snprintf(putenvBufG, sizeof(putenvBufG), "TZ=UTC");
+    putenv(putenvBufG);
+
+    int64_t ret = mktime(&stm);
+
+    snprintf(putenvBufG, sizeof(putenvBufG), "TZ=%s", tz ? tz : "");
+    putenv(putenvBufG);
+
+    // Skip the millis
+
+    if (*str == '.') {
+        str++;
+        while (isdigit(*str)) {
+            str++;
+        }
+    }
+
+    if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
+        int sign = (*str++ == '-') ? -1 : 1;
+        int hours = nextnum();
+        str += 3;
+        int minutes = nextnum();
+        ret += (-sign * (((hours * 60) + minutes) * 60));
+    }
+    // Else it should be Z to be a conformant time string, but we just assume
+    // that it is rather than enforcing that
+
+    return ret;
+}
+
+
+// Simple ACL format:  Lines of this format:
+// Type - ignored
+// Starting with a dash - ignored
+// Email email_address permission
+// UserID user_id (display_name) permission
+// Group Authenticated AWS Users permission
+// Group All Users  permission
+// permission is one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL
+static int convert_simple_acl(char *aclXml, char *ownerId,
+                              char *ownerDisplayName,
+                              int *aclGrantCountReturn,
+                              S3AclGrant *aclGrants)
+{
+    *aclGrantCountReturn = 0;
+    *ownerId = 0;
+    *ownerDisplayName = 0;
+
+#define SKIP_SPACE(require_more)                \
+    do {                                        \
+        while (isspace(*aclXml)) {              \
+            aclXml++;                           \
+        }                                       \
+        if (require_more && !*aclXml) {         \
+            return 0;                           \
+        }                                       \
+    } while (0)
+
+#define COPY_STRING_MAXLEN(field, maxlen)               \
+    do {                                                \
+        SKIP_SPACE(1);                                  \
+        int len = 0;                                    \
+        while ((len < maxlen) && !isspace(*aclXml)) {   \
+            field[len++] = *aclXml++;                   \
+        }                                               \
+        field[len] = 0;                                 \
+    } while (0)
+
+#define COPY_STRING(field)                              \
+    COPY_STRING_MAXLEN(field, (int) (sizeof(field) - 1))
+
+    while (1) {
+        SKIP_SPACE(0);
+
+        if (!*aclXml) {
+            break;
+        }
+
+        // Skip Type lines and dash lines
+        if (!strncmp(aclXml, "Type", sizeof("Type") - 1) ||
+            (*aclXml == '-')) {
+            while (*aclXml && ((*aclXml != '\n') && (*aclXml != '\r'))) {
+                aclXml++;
+            }
+            continue;
+        }
+
+        if (!strncmp(aclXml, "OwnerID", sizeof("OwnerID") - 1)) {
+            aclXml += sizeof("OwnerID") - 1;
+            COPY_STRING_MAXLEN(ownerId, S3_MAX_GRANTEE_USER_ID_SIZE);
+            SKIP_SPACE(1);
+            COPY_STRING_MAXLEN(ownerDisplayName,
+                               S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+            continue;
+        }
+
+        if (*aclGrantCountReturn == S3_MAX_ACL_GRANT_COUNT) {
+            return 0;
+        }
+
+        S3AclGrant *grant = &(aclGrants[(*aclGrantCountReturn)++]);
+
+        if (!strncmp(aclXml, "Email", sizeof("Email") - 1)) {
+            grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+            aclXml += sizeof("Email") - 1;
+            COPY_STRING(grant->grantee.amazonCustomerByEmail.emailAddress);
+        }
+        else if (!strncmp(aclXml, "UserID", sizeof("UserID") - 1)) {
+            grant->granteeType = S3GranteeTypeCanonicalUser;
+            aclXml += sizeof("UserID") - 1;
+            COPY_STRING(grant->grantee.canonicalUser.id);
+            SKIP_SPACE(1);
+            // Now do display name
+            COPY_STRING(grant->grantee.canonicalUser.displayName);
+        }
+        else if (!strncmp(aclXml, "Group", sizeof("Group") - 1)) {
+            aclXml += sizeof("Group") - 1;
+            SKIP_SPACE(1);
+            if (!strncmp(aclXml, "Authenticated AWS Users",
+                         sizeof("Authenticated AWS Users") - 1)) {
+                grant->granteeType = S3GranteeTypeAllAwsUsers;
+                aclXml += (sizeof("Authenticated AWS Users") - 1);
+            }
+            else if (!strncmp(aclXml, "All Users", sizeof("All Users") - 1)) {
+                grant->granteeType = S3GranteeTypeAllUsers;
+                aclXml += (sizeof("All Users") - 1);
+            }
+            else if (!strncmp(aclXml, "Log Delivery",
+                              sizeof("Log Delivery") - 1)) {
+                grant->granteeType = S3GranteeTypeLogDelivery;
+                aclXml += (sizeof("Log Delivery") - 1);
+            }
+            else {
+                return 0;
+            }
+        }
+        else {
+            return 0;
+        }
+
+        SKIP_SPACE(1);
+
+        if (!strncmp(aclXml, "READ_ACP", sizeof("READ_ACP") - 1)) {
+            grant->permission = S3PermissionReadACP;
+            aclXml += (sizeof("READ_ACP") - 1);
+        }
+        else if (!strncmp(aclXml, "READ", sizeof("READ") - 1)) {
+            grant->permission = S3PermissionRead;
+            aclXml += (sizeof("READ") - 1);
+        }
+        else if (!strncmp(aclXml, "WRITE_ACP", sizeof("WRITE_ACP") - 1)) {
+            grant->permission = S3PermissionWriteACP;
+            aclXml += (sizeof("WRITE_ACP") - 1);
+        }
+        else if (!strncmp(aclXml, "WRITE", sizeof("WRITE") - 1)) {
+            grant->permission = S3PermissionWrite;
+            aclXml += (sizeof("WRITE") - 1);
+        }
+        else if (!strncmp(aclXml, "FULL_CONTROL",
+                          sizeof("FULL_CONTROL") - 1)) {
+            grant->permission = S3PermissionFullControl;
+            aclXml += (sizeof("FULL_CONTROL") - 1);
+        }
+    }
+
+    return 1;
+}
+
+static int should_retry()
+{
+    if (retriesG--) {
+        // Sleep before next retry; start out with a 1 second sleep
+        static int retrySleepInterval = 1 * SLEEP_UNITS_PER_SECOND;
+        sleep(retrySleepInterval);
+        // Next sleep 1 second longer
+        retrySleepInterval++;
+        return 1;
+    }
+
+    return 0;
+}
+
+
+static struct option longOptionsG[] =
+{
+    { "force",                no_argument,        0,  'f' },
+    { "vhost-style",          no_argument,        0,  'h' },
+    { "unencrypted",          no_argument,        0,  'u' },
+    { "show-properties",      no_argument,        0,  's' },
+    { "retries",              required_argument,  0,  'r' },
+    { "timeout",              required_argument,  0,  't' },
+    { "verify-peer",          no_argument,        0,  'v' },
+    { "region",               required_argument,  0,  'g' },
+    { 0,                      0,                  0,   0  }
+};
+
+
+// response properties callback ----------------------------------------------
+
+// This callback does the same thing for every request type: prints out the
+// properties if the user has requested them to be so
+static S3Status responsePropertiesCallback
+    (const S3ResponseProperties *properties, void *callbackData)
+{
+    (void) callbackData;
+
+    if (!showResponsePropertiesG) {
+        return S3StatusOK;
+    }
+
+#define print_nonnull(name, field)                                 \
+    do {                                                           \
+        if (properties-> field) {                                  \
+            printf("%s: %s\n", name, properties-> field);          \
+        }                                                          \
+    } while (0)
+
+    print_nonnull("Content-Type", contentType);
+    print_nonnull("Request-Id", requestId);
+    print_nonnull("Request-Id-2", requestId2);
+    if (properties->contentLength > 0) {
+        printf("Content-Length: %llu\n",
+               (unsigned long long) properties->contentLength);
+    }
+    print_nonnull("Server", server);
+    print_nonnull("ETag", eTag);
+    if (properties->lastModified > 0) {
+        char timebuf[256];
+        time_t t = (time_t) properties->lastModified;
+        // gmtime is not thread-safe but we don't care here.
+        strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
+        printf("Last-Modified: %s\n", timebuf);
+    }
+    int i;
+    for (i = 0; i < properties->metaDataCount; i++) {
+        printf("x-amz-meta-%s: %s\n", properties->metaData[i].name,
+               properties->metaData[i].value);
+    }
+    if (properties->usesServerSideEncryption) {
+        printf("UsesServerSideEncryption: true\n");
+    }
+
+    return S3StatusOK;
+}
+
+
+// response complete callback ------------------------------------------------
+
+// This callback does the same thing for every request type: saves the status
+// and error stuff in global variables
+static void responseCompleteCallback(S3Status status,
+                                     const S3ErrorDetails *error,
+                                     void *callbackData)
+{
+    (void) callbackData;
+
+    statusG = status;
+    // Compose the error details message now, although we might not use it.
+    // Can't just save a pointer to [error] since it's not guaranteed to last
+    // beyond this callback
+    int len = 0;
+    if (error && error->message) {
+        len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+                        "  Message: %s\n", error->message);
+    }
+    if (error && error->resource) {
+        len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+                        "  Resource: %s\n", error->resource);
+    }
+    if (error && error->furtherDetails) {
+        len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+                        "  Further Details: %s\n", error->furtherDetails);
+    }
+    if (error && error->extraDetailsCount) {
+        len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+                        "%s", "  Extra Details:\n");
+        int i;
+        for (i = 0; i < error->extraDetailsCount; i++) {
+            len += snprintf(&(errorDetailsG[len]),
+                            sizeof(errorDetailsG) - len, "    %s: %s\n",
+                            error->extraDetails[i].name,
+                            error->extraDetails[i].value);
+        }
+    }
+}
+
+
+// list service --------------------------------------------------------------
+
+typedef struct list_service_data
+{
+    int headerPrinted;
+    int allDetails;
+} list_service_data;
+
+
+static void printListServiceHeader(int allDetails)
+{
+    printf("%-56s  %-20s", "                         Bucket",
+           "      Created");
+    if (allDetails) {
+        printf("  %-64s  %-12s",
+               "                            Owner ID",
+               "Display Name");
+    }
+    printf("\n");
+    printf("--------------------------------------------------------  "
+           "--------------------");
+    if (allDetails) {
+        printf("  -------------------------------------------------"
+               "---------------  ------------");
+    }
+    printf("\n");
+}
+
+
+static S3Status listServiceCallback(const char *ownerId,
+                                    const char *ownerDisplayName,
+                                    const char *bucketName,
+                                    int64_t creationDate, void *callbackData)
+{
+    list_service_data *data = (list_service_data *) callbackData;
+
+    if (!data->headerPrinted) {
+        data->headerPrinted = 1;
+        printListServiceHeader(data->allDetails);
+    }
+
+    char timebuf[256];
+    if (creationDate >= 0) {
+        time_t t = (time_t) creationDate;
+        strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
+    }
+    else {
+        timebuf[0] = 0;
+    }
+
+    printf("%-56s  %-20s", bucketName, timebuf);
+    if (data->allDetails) {
+        printf("  %-64s  %-12s", ownerId ? ownerId : "",
+               ownerDisplayName ? ownerDisplayName : "");
+    }
+    printf("\n");
+
+    return S3StatusOK;
+}
+
+
+static void list_service(int allDetails)
+{
+    list_service_data data;
+
+    data.headerPrinted = 0;
+    data.allDetails = allDetails;
+
+    S3_init();
+
+    S3ListServiceHandler listServiceHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &listServiceCallback
+    };
+
+    do {
+        S3_list_service(protocolG, accessKeyIdG, secretAccessKeyG, 0, 0,
+                        awsRegionG, 0, timeoutMsG, &listServiceHandler, &data);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        if (!data.headerPrinted) {
+            printListServiceHeader(allDetails);
+        }
+    }
+    else {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// test bucket ---------------------------------------------------------------
+
+static void test_bucket(int argc, char **argv, int optindex)
+{
+    // test bucket
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    if (optindex != argc) {
+        fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+        usageExit(stderr);
+    }
+
+    S3_init();
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback, &responseCompleteCallback
+    };
+
+    char locationConstraint[64];
+    do {
+        S3_test_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG, 0,
+                       0, bucketName, awsRegionG, sizeof(locationConstraint),
+                       locationConstraint, 0, timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    const char *result;
+
+    switch (statusG) {
+    case S3StatusOK:
+        // bucket exists
+        result = locationConstraint[0] ? locationConstraint : "USA";
+        break;
+    case S3StatusErrorNoSuchBucket:
+        result = "Does Not Exist";
+        break;
+    case S3StatusErrorAccessDenied:
+        result = "Access Denied";
+        break;
+    default:
+        result = 0;
+        break;
+    }
+
+    if (result) {
+        printf("%-56s  %-20s\n", "                         Bucket",
+               "       Status");
+        printf("--------------------------------------------------------  "
+               "--------------------\n");
+        printf("%-56s  %-20s\n", bucketName, result);
+    }
+    else {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// create bucket -------------------------------------------------------------
+
+static void create_bucket(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    if (!forceG && (S3_validate_bucket_name
+                    (bucketName, S3UriStyleVirtualHost) != S3StatusOK)) {
+        fprintf(stderr, "\nWARNING: Bucket name is not valid for "
+                "virtual-host style URI access.\n");
+        fprintf(stderr, "Bucket not created.  Use -f option to force the "
+                "bucket to be created despite\n");
+        fprintf(stderr, "this warning.\n\n");
+        exit(-1);
+    }
+
+    const char *locationConstraint = 0;
+    S3CannedAcl cannedAcl = S3CannedAclPrivate;
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, LOCATION_PREFIX, LOCATION_PREFIX_LEN)) {
+            locationConstraint = &(param[LOCATION_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+            char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+            if (!strcmp(val, "private")) {
+                cannedAcl = S3CannedAclPrivate;
+            }
+            else if (!strcmp(val, "public-read")) {
+                cannedAcl = S3CannedAclPublicRead;
+            }
+            else if (!strcmp(val, "public-read-write")) {
+                cannedAcl = S3CannedAclPublicReadWrite;
+            }
+            else if (!strcmp(val, "authenticated-read")) {
+                cannedAcl = S3CannedAclAuthenticatedRead;
+            }
+            else {
+                fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+                usageExit(stderr);
+            }
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    S3_init();
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback, &responseCompleteCallback
+    };
+
+    do {
+        S3_create_bucket(protocolG, accessKeyIdG, secretAccessKeyG, 0, 0,
+                         bucketName, awsRegionG, cannedAcl, locationConstraint,
+                         0, 0, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        printf("Bucket successfully created.\n");
+    }
+    else {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// delete bucket -------------------------------------------------------------
+
+static void delete_bucket(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    if (optindex != argc) {
+        fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+        usageExit(stderr);
+    }
+
+    S3_init();
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback, &responseCompleteCallback
+    };
+
+    do {
+        S3_delete_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
+                         0, 0, bucketName, awsRegionG, 0, timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG != S3StatusOK) {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// list bucket ---------------------------------------------------------------
+
+typedef struct list_bucket_callback_data
+{
+    int isTruncated;
+    char nextMarker[1024];
+    int keyCount;
+    int allDetails;
+} list_bucket_callback_data;
+
+
+static void printListBucketHeader(int allDetails)
+{
+    printf("%-50s  %-20s  %-5s",
+           "                       Key",
+           "   Last Modified", "Size");
+    if (allDetails) {
+        printf("  %-34s  %-64s  %-12s",
+               "               ETag",
+               "                            Owner ID",
+               "Display Name");
+    }
+    printf("\n");
+    printf("--------------------------------------------------  "
+           "--------------------  -----");
+    if (allDetails) {
+        printf("  ----------------------------------  "
+               "-------------------------------------------------"
+               "---------------  ------------");
+    }
+    printf("\n");
+}
+
+
+static S3Status listBucketCallback(int isTruncated, const char *nextMarker,
+                                   int contentsCount,
+                                   const S3ListBucketContent *contents,
+                                   int commonPrefixesCount,
+                                   const char **commonPrefixes,
+                                   void *callbackData)
+{
+    list_bucket_callback_data *data =
+        (list_bucket_callback_data *) callbackData;
+
+    data->isTruncated = isTruncated;
+    // This is tricky.  S3 doesn't return the NextMarker if there is no
+    // delimiter.  Why, I don't know, since it's still useful for paging
+    // through results.  We want NextMarker to be the last content in the
+    // list, so set it to that if necessary.
+    if ((!nextMarker || !nextMarker[0]) && contentsCount) {
+        nextMarker = contents[contentsCount - 1].key;
+    }
+    if (nextMarker) {
+        snprintf(data->nextMarker, sizeof(data->nextMarker), "%s",
+                 nextMarker);
+    }
+    else {
+        data->nextMarker[0] = 0;
+    }
+
+    if (contentsCount && !data->keyCount) {
+        printListBucketHeader(data->allDetails);
+    }
+
+    int i;
+    for (i = 0; i < contentsCount; i++) {
+        const S3ListBucketContent *content = &(contents[i]);
+        char timebuf[256];
+        if (0) {
+            time_t t = (time_t) content->lastModified;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            printf("\nKey: %s\n", content->key);
+            printf("Last Modified: %s\n", timebuf);
+            printf("ETag: %s\n", content->eTag);
+            printf("Size: %llu\n", (unsigned long long) content->size);
+            if (content->ownerId) {
+                printf("Owner ID: %s\n", content->ownerId);
+            }
+            if (content->ownerDisplayName) {
+                printf("Owner Display Name: %s\n", content->ownerDisplayName);
+            }
+        }
+        else {
+            time_t t = (time_t) content->lastModified;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            char sizebuf[16];
+            if (content->size < 100000) {
+                sprintf(sizebuf, "%5llu", (unsigned long long) content->size);
+            }
+            else if (content->size < (1024 * 1024)) {
+                sprintf(sizebuf, "%4lluK",
+                        ((unsigned long long) content->size) / 1024ULL);
+            }
+            else if (content->size < (10 * 1024 * 1024)) {
+                float f = content->size;
+                f /= (1024 * 1024);
+                sprintf(sizebuf, "%1.2fM", f);
+            }
+            else if (content->size < (1024 * 1024 * 1024)) {
+                sprintf(sizebuf, "%4lluM",
+                        ((unsigned long long) content->size) /
+                        (1024ULL * 1024ULL));
+            }
+            else {
+                float f = (content->size / 1024);
+                f /= (1024 * 1024);
+                sprintf(sizebuf, "%1.2fG", f);
+            }
+            printf("%-50s  %s  %s", content->key, timebuf, sizebuf);
+            if (data->allDetails) {
+                printf("  %-34s  %-64s  %-12s",
+                       content->eTag,
+                       content->ownerId ? content->ownerId : "",
+                       content->ownerDisplayName ?
+                       content->ownerDisplayName : "");
+            }
+            printf("\n");
+        }
+    }
+
+    data->keyCount += contentsCount;
+
+    for (i = 0; i < commonPrefixesCount; i++) {
+        printf("\nCommon Prefix: %s\n", commonPrefixes[i]);
+    }
+
+    return S3StatusOK;
+}
+
+
+static void list_bucket(const char *bucketName, const char *prefix,
+                        const char *marker, const char *delimiter,
+                        int maxkeys, int allDetails)
+{
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ListBucketHandler listBucketHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &listBucketCallback
+    };
+
+    list_bucket_callback_data data;
+
+    if (marker) {
+        snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
+    } else {
+        data.nextMarker[0] = 0;
+    }
+    data.keyCount = 0;
+    data.allDetails = allDetails;
+
+    do {
+        data.isTruncated = 0;
+        do {
+            S3_list_bucket(&bucketContext, prefix, data.nextMarker,
+                           delimiter, maxkeys, 0, timeoutMsG, &listBucketHandler, &data);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+        if (statusG != S3StatusOK) {
+            break;
+        }
+    } while (data.isTruncated && (!maxkeys || (data.keyCount < maxkeys)));
+
+    if (statusG == S3StatusOK) {
+        if (!data.keyCount) {
+            printListBucketHeader(allDetails);
+        }
+    }
+    else {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+static void list(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        list_service(0);
+        return;
+    }
+
+    const char *bucketName = 0;
+
+    const char *prefix = 0, *marker = 0, *delimiter = 0;
+    int maxkeys = 0, allDetails = 0;
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+
+        if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) {
+            prefix = &(param[PREFIX_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, MARKER_PREFIX, MARKER_PREFIX_LEN)) {
+            marker = &(param[MARKER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, DELIMITER_PREFIX, DELIMITER_PREFIX_LEN)) {
+            delimiter = &(param[DELIMITER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, MAXKEYS_PREFIX, MAXKEYS_PREFIX_LEN)) {
+            maxkeys = convertInt(&(param[MAXKEYS_PREFIX_LEN]), "maxkeys");
+        }
+        else if (!strncmp(param, ALL_DETAILS_PREFIX,
+                          ALL_DETAILS_PREFIX_LEN)) {
+            const char *ad = &(param[ALL_DETAILS_PREFIX_LEN]);
+            if (!strcmp(ad, "true") || !strcmp(ad, "TRUE") ||
+                !strcmp(ad, "yes") || !strcmp(ad, "YES") ||
+                !strcmp(ad, "1")) {
+                allDetails = 1;
+            }
+        }
+        else if (!bucketName) {
+            bucketName = param;
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    if (bucketName) {
+        list_bucket(bucketName, prefix, marker, delimiter, maxkeys,
+                    allDetails);
+    }
+    else {
+        list_service(allDetails);
+    }
+}
+
+
+typedef struct list_multiparts_callback_data
+{
+    int isTruncated;
+    char nextKeyMarker[1024];
+    char nextUploadIdMarker[1024];
+    int uploadCount;
+    int allDetails;
+} list_multiparts_callback_data;
+
+
+typedef struct UploadManager{
+    //used for initial multipart
+    char * upload_id;
+
+    //used for upload part object
+    char **etags;
+    int next_etags_pos;
+
+    //used for commit Upload
+    growbuffer *gb;
+    int remaining;
+} UploadManager;
+
+
+typedef struct list_parts_callback_data
+{
+    int isTruncated;
+    char nextPartNumberMarker[24];
+    char initiatorId[256];
+    char initiatorDisplayName[256];
+    char ownerId[256];
+    char ownerDisplayName[256];
+    char storageClass[256];
+    int partsCount;
+    int handlePartsStart;
+    int allDetails;
+    int noPrint;
+    UploadManager *manager;
+} list_parts_callback_data;
+
+
+typedef struct list_upload_callback_data
+{
+    char uploadId[1024];
+} abort_upload_callback_data;
+
+static void printListMultipartHeader(int allDetails)
+{
+    (void)allDetails;
+}
+
+
+
+static void printListPartsHeader()
+{
+    printf("%-25s  %-30s  %-30s   %-15s",
+           "LastModified",
+           "PartNumber", "ETag", "SIZE");
+
+    printf("\n");
+    printf("---------------------  "
+           "    -------------    "
+           "-------------------------------  "
+           "               -----");
+    printf("\n");
+}
+
+
+static S3Status listMultipartCallback(int isTruncated, const char *nextKeyMarker,
+                                   const char *nextUploadIdMarker,
+                                   int uploadsCount,
+                                   const S3ListMultipartUpload *uploads,
+                                   int commonPrefixesCount,
+                                   const char **commonPrefixes,
+                                   void *callbackData)
+{
+    list_multiparts_callback_data *data =
+        (list_multiparts_callback_data *) callbackData;
+
+    data->isTruncated = isTruncated;
+    /*
+    // This is tricky.  S3 doesn't return the NextMarker if there is no
+    // delimiter.  Why, I don't know, since it's still useful for paging
+    // through results.  We want NextMarker to be the last content in the
+    // list, so set it to that if necessary.
+    if ((!nextKeyMarker || !nextKeyMarker[0]) && uploadsCount) {
+        nextKeyMarker = uploads[uploadsCount - 1].key;
+    }*/
+    if (nextKeyMarker) {
+        snprintf(data->nextKeyMarker, sizeof(data->nextKeyMarker), "%s",
+                 nextKeyMarker);
+    }
+    else {
+        data->nextKeyMarker[0] = 0;
+    }
+
+    if (nextUploadIdMarker) {
+        snprintf(data->nextUploadIdMarker, sizeof(data->nextUploadIdMarker), "%s",
+                 nextUploadIdMarker);
+    }
+    else {
+        data->nextUploadIdMarker[0] = 0;
+    }
+
+    if (uploadsCount && !data->uploadCount) {
+        printListMultipartHeader(data->allDetails);
+    }
+
+    int i;
+    for (i = 0; i < uploadsCount; i++) {
+        const S3ListMultipartUpload *upload = &(uploads[i]);
+        char timebuf[256];
+        if (1) {
+            time_t t = (time_t) upload->initiated;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            printf("\nKey: %s\n", upload->key);
+            printf("Initiated: %s\n", timebuf);
+            printf("UploadId: %s\n", upload->uploadId);
+            if (upload->initiatorId) {
+                printf("Initiator ID: %s\n", upload->initiatorId);
+            }
+            if (upload->initiatorDisplayName) {
+                printf("Initiator Display Name: %s\n", upload->initiatorDisplayName);
+            }
+            if (upload->ownerId) {
+                printf("Owner ID: %s\n", upload->ownerId);
+            }
+            if (upload->ownerDisplayName) {
+                printf("Owner Display Name: %s\n", upload->ownerDisplayName);
+            }
+            printf("StorageClass: %s\n", upload->storageClass);
+        }
+        else {
+            time_t t = (time_t) upload->initiated;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            printf("%-50s  %s %-50s", upload->key, timebuf, upload->uploadId);
+            if (data->allDetails) {
+                printf("  %-34s  %-64s  %-12s  %-64s  %-12s",
+                       upload->storageClass,
+                       upload->ownerId ? upload->ownerId : "",
+                       upload->ownerDisplayName ?
+                       upload->ownerDisplayName : "",
+                       upload->initiatorId ? upload->initiatorId : "",
+                       upload->initiatorDisplayName ?
+                       upload->initiatorDisplayName : "");
+            }
+            printf("\n");
+        }
+    }
+
+    data->uploadCount += uploadsCount;
+
+    for (i = 0; i < commonPrefixesCount; i++) {
+        printf("\nCommon Prefix: %s\n", commonPrefixes[i]);
+    }
+
+    return S3StatusOK;
+}
+
+
+static S3Status listPartsCallback(int isTruncated,
+                                  const char *nextPartNumberMarker,
+                                  const char *initiatorId,
+                                  const char *initiatorDisplayName,
+                                  const char *ownerId,
+                                  const char *ownerDisplayName,
+                                  const char *storageClass,
+                                  int partsCount,
+                                  int handlePartsStart,
+                                  const S3ListPart *parts,
+                                  void *callbackData)
+{
+    list_parts_callback_data *data =
+        (list_parts_callback_data *) callbackData;
+
+    data->isTruncated = isTruncated;
+    data->handlePartsStart = handlePartsStart;
+    UploadManager *manager = data->manager;
+    /*
+    // This is tricky.  S3 doesn't return the NextMarker if there is no
+    // delimiter.  Why, I don't know, since it's still useful for paging
+    // through results.  We want NextMarker to be the last content in the
+    // list, so set it to that if necessary.
+    if ((!nextKeyMarker || !nextKeyMarker[0]) && uploadsCount) {
+        nextKeyMarker = uploads[uploadsCount - 1].key;
+    }*/
+    if (nextPartNumberMarker) {
+        snprintf(data->nextPartNumberMarker,
+                 sizeof(data->nextPartNumberMarker), "%s",
+                 nextPartNumberMarker);
+    }
+    else {
+        data->nextPartNumberMarker[0] = 0;
+    }
+
+    if (initiatorId) {
+        snprintf(data->initiatorId, sizeof(data->initiatorId), "%s",
+                 initiatorId);
+    }
+    else {
+        data->initiatorId[0] = 0;
+    }
+
+    if (initiatorDisplayName) {
+        snprintf(data->initiatorDisplayName,
+                 sizeof(data->initiatorDisplayName), "%s",
+                 initiatorDisplayName);
+    }
+    else {
+        data->initiatorDisplayName[0] = 0;
+    }
+
+    if (ownerId) {
+        snprintf(data->ownerId, sizeof(data->ownerId), "%s",
+                 ownerId);
+    }
+    else {
+        data->ownerId[0] = 0;
+    }
+
+    if (ownerDisplayName) {
+        snprintf(data->ownerDisplayName, sizeof(data->ownerDisplayName), "%s",
+                 ownerDisplayName);
+    }
+    else {
+        data->ownerDisplayName[0] = 0;
+    }
+
+    if (storageClass) {
+        snprintf(data->storageClass, sizeof(data->storageClass), "%s",
+                 storageClass);
+    }
+    else {
+        data->storageClass[0] = 0;
+    }
+
+    if (partsCount && !data->partsCount && !data->noPrint) {
+        printListPartsHeader();
+    }
+
+    int i;
+    for (i = 0; i < partsCount; i++) {
+        const S3ListPart *part = &(parts[i]);
+        char timebuf[256];
+        if (data->noPrint) {
+            manager->etags[handlePartsStart+i] = strdup(part->eTag);
+            manager->next_etags_pos++;
+            manager->remaining = manager->remaining - part->size;
+        } else {
+            time_t t = (time_t) part->lastModified;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            printf("%-30s", timebuf);
+            printf("%-15llu", (unsigned long long) part->partNumber);
+            printf("%-45s", part->eTag);
+            printf("%-15llu\n", (unsigned long long) part->size);
+        }
+    }
+
+    data->partsCount += partsCount;
+
+    return S3StatusOK;
+}
+
+
+static void list_multipart_uploads(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Usage: listmultiparts <bucket name>\n");
+        return;
+    }
+    const char *bucketName = 0;
+
+    const char *prefix = 0, *keymarker = 0, *delimiter = 0;
+    const char *encodingtype = 0, *uploadidmarker = 0;
+    int maxuploads = 0, allDetails = 0;
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) {
+            prefix = &(param[PREFIX_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, KEY_MARKER_PREFIX, KEY_MARKER_PREFIX_LEN)) {
+            keymarker = &(param[KEY_MARKER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, DELIMITER_PREFIX, DELIMITER_PREFIX_LEN)) {
+            delimiter = &(param[DELIMITER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, ENCODING_TYPE_PREFIX,
+                          ENCODING_TYPE_PREFIX_LEN)) {
+            encodingtype = &(param[ENCODING_TYPE_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, UPLOAD_ID_MARKER_PREFIX,
+                          UPLOAD_ID_MARKER_PREFIX_LEN)) {
+            uploadidmarker = &(param[UPLOAD_ID_MARKER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, MAX_UPLOADS_PREFIX, MAX_UPLOADS_PREFIX_LEN)) {
+            maxuploads = convertInt(&(param[MAX_UPLOADS_PREFIX_LEN]),
+                                    "maxuploads");
+        }
+        else if (!strncmp(param, ALL_DETAILS_PREFIX, ALL_DETAILS_PREFIX_LEN)) {
+            const char *ad = &(param[ALL_DETAILS_PREFIX_LEN]);
+            if (!strcmp(ad, "true") || !strcmp(ad, "TRUE") ||
+                !strcmp(ad, "yes") || !strcmp(ad, "YES") ||
+                !strcmp(ad, "1")) {
+                allDetails = 1;
+            }
+        }
+        else if (!bucketName) {
+            bucketName = param;
+        }
+
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+    if (bucketName) {
+
+        S3_init();
+
+        S3BucketContext bucketContext =
+        {
+            0,
+            bucketName,
+            protocolG,
+            uriStyleG,
+            accessKeyIdG,
+            secretAccessKeyG,
+            0,
+            awsRegionG
+        };
+
+        S3ListMultipartUploadsHandler listMultipartUploadsHandler =
+        {
+            { &responsePropertiesCallback, &responseCompleteCallback },
+            &listMultipartCallback
+        };
+
+        list_multiparts_callback_data data;
+
+        memset(&data, 0, sizeof(list_multiparts_callback_data));
+        if (keymarker != 0) {
+            snprintf(data.nextKeyMarker, sizeof(data.nextKeyMarker), "%s",
+                     keymarker);
+        }
+        if (uploadidmarker != 0) {
+            snprintf(data.nextUploadIdMarker, sizeof(data.nextUploadIdMarker),
+                     "%s", uploadidmarker);
+        }
+        data.uploadCount = 0;
+        data.allDetails = allDetails;
+
+        do {
+            data.isTruncated = 0;
+            do {
+                S3_list_multipart_uploads(&bucketContext, prefix,
+                                          data.nextKeyMarker,
+                                          data.nextUploadIdMarker, encodingtype,
+                                          delimiter, maxuploads, 0,
+                                          timeoutMsG,
+                                          &listMultipartUploadsHandler, &data);
+            } while (S3_status_is_retryable(statusG) && should_retry());
+            if (statusG != S3StatusOK) {
+                break;
+            }
+        } while (data.isTruncated &&
+                 (!maxuploads || (data.uploadCount < maxuploads)));
+
+        if (statusG == S3StatusOK) {
+            if (!data.uploadCount) {
+                printListMultipartHeader(data.allDetails);
+            }
+        }
+        else {
+            printError();
+        }
+
+        S3_deinitialize();
+    }
+}
+
+
+static void list_parts(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Usage: listparts <bucket name> <filename> "
+                "<upload-id>\n");
+        return;
+    }
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+    const char *uploadid = 0, *partnumbermarker = 0;
+    const char *encodingtype = 0;
+    int allDetails = 0, maxparts = 0;
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, UPLOAD_ID_PREFIX, UPLOAD_ID_PREFIX_LEN)) {
+            uploadid = &(param[UPLOAD_ID_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, PART_NUMBER_MARKER_PREFIX,
+                          PART_NUMBER_MARKER_PREFIX_LEN)) {
+            partnumbermarker = &(param[PART_NUMBER_MARKER_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, ENCODING_TYPE_PREFIX,
+                          ENCODING_TYPE_PREFIX_LEN)) {
+            encodingtype = &(param[ENCODING_TYPE_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, MAX_PARTS_PREFIX, MAX_PARTS_PREFIX_LEN)) {
+            maxparts = convertInt(&(param[MAX_PARTS_PREFIX_LEN]), "max-parts");
+        }
+        else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            key = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, ALL_DETAILS_PREFIX,
+                          ALL_DETAILS_PREFIX_LEN)) {
+            const char *ad = &(param[ALL_DETAILS_PREFIX_LEN]);
+            if (!strcmp(ad, "true") || !strcmp(ad, "TRUE") ||
+                !strcmp(ad, "yes") || !strcmp(ad, "YES") ||
+                !strcmp(ad, "1")) {
+                allDetails = 1;
+            }
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+    if (bucketName) {
+
+        S3_init();
+
+        S3BucketContext bucketContext =
+        {
+            0,
+            bucketName,
+            protocolG,
+            uriStyleG,
+            accessKeyIdG,
+            secretAccessKeyG,
+            0,
+            awsRegionG
+        };
+
+        S3ListPartsHandler listPartsHandler =
+        {
+            { &responsePropertiesCallback, &responseCompleteCallback },
+            &listPartsCallback
+        };
+
+        list_parts_callback_data data;
+
+        memset(&data, 0, sizeof(list_parts_callback_data));
+        if (partnumbermarker != 0) {
+            snprintf(data.nextPartNumberMarker,
+                     sizeof(data.nextPartNumberMarker), "%s", partnumbermarker);
+        }
+
+        data.partsCount = 0;
+        data.allDetails = allDetails;
+        data.noPrint = 0;
+
+        do {
+            data.isTruncated = 0;
+            do {
+                S3_list_parts(&bucketContext, key, data.nextPartNumberMarker,
+                                uploadid, encodingtype,
+                                maxparts,
+                               0, timeoutMsG,
+                               &listPartsHandler, &data);
+            } while (S3_status_is_retryable(statusG) && should_retry());
+            if (statusG != S3StatusOK) {
+                break;
+            }
+        } while (data.isTruncated &&
+                 (!maxparts || (data.partsCount < maxparts)));
+
+        if (statusG == S3StatusOK) {
+            if (!data.partsCount) {
+                printListMultipartHeader(data.allDetails);
+            }
+        }
+        else {
+            printError();
+        }
+
+        S3_deinitialize();
+    }
+}
+
+
+static void abort_multipart_upload(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Usage: abortmultipartupload <bucket name> "
+                "<upload-id>\n");
+        return;
+    }
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+    const char *uploadid = 0;
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, UPLOAD_ID_PREFIX, UPLOAD_ID_PREFIX_LEN)) {
+            uploadid = &(param[UPLOAD_ID_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            key = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+    if (bucketName) {
+
+        S3_init();
+
+        S3BucketContext bucketContext =
+        {
+            0,
+            bucketName,
+            protocolG,
+            uriStyleG,
+            accessKeyIdG,
+            secretAccessKeyG,
+            0,
+            awsRegionG
+        };
+
+        S3AbortMultipartUploadHandler abortMultipartUploadHandler =
+        {
+            { &responsePropertiesCallback, &responseCompleteCallback },
+        };
+
+        /*
+        list_multiparts_callback_data data;
+
+        memset(&data, 0, sizeof(list_multiparts_callback_data));
+        if (keymarker != 0) {
+            snprintf(data.nextKeyMarker, sizeof(data.nextKeyMarker), "%s",
+                     keymarker);
+        }
+        if (uploadidmarker != 0) {
+            snprintf(data.nextUploadIdMarker, sizeof(data.nextUploadIdMarker),
+                     "%s", uploadidmarker);
+        }
+
+        data.uploadCount = 0;
+        data.allDetails = allDetails;
+        */
+
+        do {
+            S3_abort_multipart_upload(&bucketContext, key, uploadid,
+                           timeoutMsG, &abortMultipartUploadHandler);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+
+        S3_deinitialize();
+    }
+}
+
+
+// delete object -------------------------------------------------------------
+
+static void delete_object(int argc, char **argv, int optindex)
+{
+    (void) argc;
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+
+    // We know there is a slash in there, put_object is only called if so
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        0,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_delete_object(&bucketContext, key, 0, timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if ((statusG != S3StatusOK) &&
+        (statusG != S3StatusErrorPreconditionFailed)) {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// put object ----------------------------------------------------------------
+
+typedef struct put_object_callback_data
+{
+    FILE *infile;
+    growbuffer *gb;
+    uint64_t contentLength, originalContentLength;
+    uint64_t totalContentLength, totalOriginalContentLength;
+    int noStatus;
+} put_object_callback_data;
+
+
+static int putObjectDataCallback(int bufferSize, char *buffer,
+                                 void *callbackData)
+{
+    put_object_callback_data *data =
+        (put_object_callback_data *) callbackData;
+
+    int ret = 0;
+
+    if (data->contentLength) {
+        int toRead = ((data->contentLength > (unsigned) bufferSize) ?
+                      (unsigned) bufferSize : data->contentLength);
+        if (data->gb) {
+            growbuffer_read(&(data->gb), toRead, &ret, buffer);
+        }
+        else if (data->infile) {
+            ret = fread(buffer, 1, toRead, data->infile);
+        }
+    }
+
+    data->contentLength -= ret;
+    data->totalContentLength -= ret;
+
+    if (data->contentLength && !data->noStatus) {
+        // Avoid a weird bug in MingW, which won't print the second integer
+        // value properly when it's in the same call, so print separately
+        printf("%llu bytes remaining ",
+               (unsigned long long) data->totalContentLength);
+        printf("(%d%% complete) ...\n",
+               (int) (((data->totalOriginalContentLength -
+                        data->totalContentLength) * 100) /
+                      data->totalOriginalContentLength));
+    }
+
+    return ret;
+}
+
+#define MULTIPART_CHUNK_SIZE (15 << 20) // multipart is 15M
+
+typedef struct MultipartPartData {
+    put_object_callback_data put_object_data;
+    int seq;
+    UploadManager *manager;
+} MultipartPartData;
+
+
+S3Status initial_multipart_callback(const char * upload_id,
+                                    void * callbackData)
+{
+    UploadManager *manager = (UploadManager *) callbackData;
+    manager->upload_id = strdup(upload_id);
+    return S3StatusOK;
+}
+
+
+S3Status MultipartResponseProperiesCallback
+    (const S3ResponseProperties *properties, void *callbackData)
+{
+    responsePropertiesCallback(properties, callbackData);
+    MultipartPartData *data = (MultipartPartData *) callbackData;
+    int seq = data->seq;
+    const char *etag = properties->eTag;
+    data->manager->etags[seq - 1] = strdup(etag);
+    data->manager->next_etags_pos = seq;
+    return S3StatusOK;
+}
+
+
+static int multipartPutXmlCallback(int bufferSize, char *buffer,
+                                   void *callbackData)
+{
+    UploadManager *manager = (UploadManager*)callbackData;
+    int ret = 0;
+    if (manager->remaining) {
+        int toRead = ((manager->remaining > bufferSize) ?
+                      bufferSize : manager->remaining);
+        growbuffer_read(&(manager->gb), toRead, &ret, buffer);
+    }
+    manager->remaining -= ret;
+    return ret;
+}
+
+
+static int try_get_parts_info(const char *bucketName, const char *key,
+                              UploadManager *manager)
+{
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ListPartsHandler listPartsHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &listPartsCallback
+    };
+
+    list_parts_callback_data data;
+
+    memset(&data, 0, sizeof(list_parts_callback_data));
+
+    data.partsCount = 0;
+    data.allDetails = 0;
+    data.manager = manager;
+    data.noPrint = 1;
+    do {
+        data.isTruncated = 0;
+        do {
+            S3_list_parts(&bucketContext, key, data.nextPartNumberMarker,
+                          manager->upload_id, 0, 0, 0, timeoutMsG, &listPartsHandler,
+                          &data);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+        if (statusG != S3StatusOK) {
+            break;
+        }
+    } while (data.isTruncated);
+
+    if (statusG == S3StatusOK) {
+        if (!data.partsCount) {
+            printListMultipartHeader(data.allDetails);
+        }
+    }
+    else {
+        printError();
+        return -1;
+    }
+
+    return 0;
+}
+
+
+static void put_object(int argc, char **argv, int optindex,
+                       const char *srcBucketName, const char *srcKey, unsigned long long srcSize)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+        usageExit(stderr);
+    }
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+    const char *uploadId = 0;
+    const char *filename = 0;
+    uint64_t contentLength = 0;
+    const char *cacheControl = 0, *contentType = 0, *md5 = 0;
+    const char *contentDispositionFilename = 0, *contentEncoding = 0;
+    int64_t expires = -1;
+    S3CannedAcl cannedAcl = S3CannedAclPrivate;
+    int metaPropertiesCount = 0;
+    S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
+    char useServerSideEncryption = 0;
+    int noStatus = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, CONTENT_LENGTH_PREFIX,
+                          CONTENT_LENGTH_PREFIX_LEN)) {
+            contentLength = convertInt(&(param[CONTENT_LENGTH_PREFIX_LEN]),
+                                       "contentLength");
+            if (contentLength > (5LL * 1024 * 1024 * 1024)) {
+                fprintf(stderr, "\nERROR: contentLength must be no greater "
+                        "than 5 GB\n");
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, CACHE_CONTROL_PREFIX,
+                          CACHE_CONTROL_PREFIX_LEN)) {
+            cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, CONTENT_TYPE_PREFIX,
+                          CONTENT_TYPE_PREFIX_LEN)) {
+            contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, MD5_PREFIX, MD5_PREFIX_LEN)) {
+            md5 = &(param[MD5_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
+                          CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
+            contentDispositionFilename =
+                &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
+                          CONTENT_ENCODING_PREFIX_LEN)) {
+            contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, UPLOAD_ID_PREFIX,
+                          UPLOAD_ID_PREFIX_LEN)) {
+            uploadId = &(param[UPLOAD_ID_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+            expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+            if (expires < 0) {
+                fprintf(stderr, "\nERROR: Invalid expires time "
+                        "value; ISO 8601 time format required\n");
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
+            if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
+                fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
+                        "limit %lu: %s\n",
+                        (unsigned long) S3_MAX_METADATA_COUNT, param);
+                usageExit(stderr);
+            }
+            char *name = &(param[X_AMZ_META_PREFIX_LEN]);
+            char *value = name;
+            while (*value && (*value != '=')) {
+                value++;
+            }
+            if (!*value || !*(value + 1)) {
+                fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
+                usageExit(stderr);
+            }
+            *value++ = 0;
+            metaProperties[metaPropertiesCount].name = name;
+            metaProperties[metaPropertiesCount++].value = value;
+        }
+        else if (!strncmp(param, USE_SERVER_SIDE_ENCRYPTION_PREFIX,
+                          USE_SERVER_SIDE_ENCRYPTION_PREFIX_LEN)) {
+            const char *val = &(param[USE_SERVER_SIDE_ENCRYPTION_PREFIX_LEN]);
+            if (!strcmp(val, "true") || !strcmp(val, "TRUE") ||
+                !strcmp(val, "yes") || !strcmp(val, "YES") ||
+                !strcmp(val, "1")) {
+                useServerSideEncryption = 1;
+            }
+            else {
+                useServerSideEncryption = 0;
+            }
+        }
+        else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+            char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+            if (!strcmp(val, "private")) {
+                cannedAcl = S3CannedAclPrivate;
+            }
+            else if (!strcmp(val, "public-read")) {
+                cannedAcl = S3CannedAclPublicRead;
+            }
+            else if (!strcmp(val, "public-read-write")) {
+                cannedAcl = S3CannedAclPublicReadWrite;
+            }
+            else if (!strcmp(val, "authenticated-read")) {
+                cannedAcl = S3CannedAclAuthenticatedRead;
+            }
+            else {
+                fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, NO_STATUS_PREFIX, NO_STATUS_PREFIX_LEN)) {
+            const char *ns = &(param[NO_STATUS_PREFIX_LEN]);
+            if (!strcmp(ns, "true") || !strcmp(ns, "TRUE") ||
+                !strcmp(ns, "yes") || !strcmp(ns, "YES") ||
+                !strcmp(ns, "1")) {
+                noStatus = 1;
+            }
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    put_object_callback_data data;
+
+    data.infile = 0;
+    data.gb = 0;
+    data.noStatus = noStatus;
+
+    if (srcSize) {
+        // This is really a COPY multipart, not a put, so take from source object
+        contentLength = srcSize;
+        data.infile = NULL;
+    }
+    else if (filename) {
+        if (!contentLength) {
+            struct stat statbuf;
+            // Stat the file to get its length
+            if (stat(filename, &statbuf) == -1) {
+                fprintf(stderr, "\nERROR: Failed to stat file %s: ",
+                        filename);
+                perror(0);
+                exit(-1);
+            }
+            contentLength = statbuf.st_size;
+        }
+        // Open the file
+        if (!(data.infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+            fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else {
+        // Read from stdin.  If contentLength is not provided, we have
+        // to read it all in to get contentLength.
+        if (!contentLength) {
+            // Read all if stdin to get the data
+            char buffer[64 * 1024];
+            while (1) {
+                int amtRead = fread(buffer, 1, sizeof(buffer), stdin);
+                if (amtRead == 0) {
+                    break;
+                }
+                if (!growbuffer_append(&(data.gb), buffer, amtRead)) {
+                    fprintf(stderr, "\nERROR: Out of memory while reading "
+                            "stdin\n");
+                    exit(-1);
+                }
+                contentLength += amtRead;
+                if (amtRead < (int) sizeof(buffer)) {
+                    break;
+                }
+            }
+        }
+        else {
+            data.infile = stdin;
+        }
+    }
+
+    data.totalContentLength =
+    data.totalOriginalContentLength =
+    data.contentLength =
+    data.originalContentLength =
+            contentLength;
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3PutProperties putProperties =
+    {
+        contentType,
+        md5,
+        cacheControl,
+        contentDispositionFilename,
+        contentEncoding,
+        expires,
+        cannedAcl,
+        metaPropertiesCount,
+        metaProperties,
+        useServerSideEncryption
+    };
+
+    if (contentLength <= MULTIPART_CHUNK_SIZE) {
+        S3PutObjectHandler putObjectHandler =
+        {
+            { &responsePropertiesCallback, &responseCompleteCallback },
+            &putObjectDataCallback
+        };
+
+        do {
+            S3_put_object(&bucketContext, key, contentLength, &putProperties, 0,
+                          0, &putObjectHandler, &data);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+
+        if (data.infile) {
+            fclose(data.infile);
+        }
+        else if (data.gb) {
+            growbuffer_destroy(data.gb);
+        }
+
+        if (statusG != S3StatusOK) {
+            printError();
+        }
+        else if (data.contentLength) {
+            fprintf(stderr, "\nERROR: Failed to read remaining %llu bytes from "
+                    "input\n", (unsigned long long) data.contentLength);
+        }
+    }
+    else {
+        uint64_t totalContentLength = contentLength;
+        uint64_t todoContentLength = contentLength;
+        UploadManager manager;
+        manager.upload_id = 0;
+        manager.gb = 0;
+
+        //div round up
+        int seq;
+        int totalSeq = ((contentLength + MULTIPART_CHUNK_SIZE- 1) /
+                        MULTIPART_CHUNK_SIZE);
+
+        MultipartPartData partData;
+        int partContentLength = 0;
+
+        S3MultipartInitialHandler handler = {
+            {
+                &responsePropertiesCallback,
+                &responseCompleteCallback
+            },
+            &initial_multipart_callback
+        };
+
+        S3PutObjectHandler putObjectHandler = {
+            {&MultipartResponseProperiesCallback, &responseCompleteCallback },
+            &putObjectDataCallback
+        };
+
+        S3MultipartCommitHandler commit_handler = {
+            {
+                &responsePropertiesCallback,&responseCompleteCallback
+            },
+            &multipartPutXmlCallback,
+            0
+        };
+
+        manager.etags = (char **) malloc(sizeof(char *) * totalSeq);
+        manager.next_etags_pos = 0;
+
+        if (uploadId) {
+            manager.upload_id = strdup(uploadId);
+            manager.remaining = contentLength;
+            if (!try_get_parts_info(bucketName, key, &manager)) {
+                fseek(data.infile, -(manager.remaining), 2);
+                contentLength = manager.remaining;
+                goto upload;
+            } else {
+                goto clean;
+            }
+        }
+
+        do {
+            S3_initiate_multipart(&bucketContext, key,0, &handler,0, timeoutMsG, &manager);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+
+        if (manager.upload_id == 0 || statusG != S3StatusOK) {
+            printError();
+            goto clean;
+        }
+
+upload:
+        todoContentLength -= MULTIPART_CHUNK_SIZE * manager.next_etags_pos;
+        for (seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
+            memset(&partData, 0, sizeof(MultipartPartData));
+            partData.manager = &manager;
+            partData.seq = seq;
+            partData.put_object_data = data;
+            partContentLength = ((contentLength > MULTIPART_CHUNK_SIZE) ?
+                                 MULTIPART_CHUNK_SIZE : contentLength);
+            printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength);
+            partData.put_object_data.contentLength = partContentLength;
+            partData.put_object_data.originalContentLength = partContentLength;
+            partData.put_object_data.totalContentLength = todoContentLength;
+            partData.put_object_data.totalOriginalContentLength = totalContentLength;
+            putProperties.md5 = 0;
+            do {
+                if (srcSize) {
+                    S3BucketContext srcBucketContext =
+                    {
+                        0,
+                        srcBucketName,
+                        protocolG,
+                        uriStyleG,
+                        accessKeyIdG,
+                        secretAccessKeyG,
+                        0,
+                        awsRegionG
+                    };
+
+                    S3ResponseHandler copyResponseHandler = { &responsePropertiesCallback, &responseCompleteCallback };
+                    int64_t lastModified;
+
+                    unsigned long long startOffset = (unsigned long long)MULTIPART_CHUNK_SIZE * (unsigned long long)(seq-1);
+                    unsigned long long count = partContentLength - 1; // Inclusive for copies
+                    // The default copy callback tries to set this for us, need to allocate here
+                    manager.etags[seq-1] = malloc(512); // TBD - magic #!  Isa there a max etag defined?
+                    S3_copy_object_range(&srcBucketContext, srcKey,
+                                         bucketName, key,
+                                         seq, manager.upload_id,
+                                         startOffset, count,
+                                         &putProperties,
+                                         &lastModified, 512 /*TBD - magic # */,
+                                         manager.etags[seq-1], 0,
+                                         timeoutMsG,
+                                         &copyResponseHandler, 0);
+                } else {
+                    S3_upload_part(&bucketContext, key, &putProperties,
+                                   &putObjectHandler, seq, manager.upload_id,
+                                   partContentLength,
+                                   0, timeoutMsG,
+                                   &partData);
+                }
+            } while (S3_status_is_retryable(statusG) && should_retry());
+            if (statusG != S3StatusOK) {
+                printError();
+                goto clean;
+            }
+            contentLength -= MULTIPART_CHUNK_SIZE;
+            todoContentLength -= MULTIPART_CHUNK_SIZE;
+        }
+
+        int i;
+        int size = 0;
+        size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>",
+                                  strlen("<CompleteMultipartUpload>"));
+        char buf[256];
+        int n;
+        for (i = 0; i < totalSeq; i++) {
+            n = snprintf(buf, sizeof(buf), "<Part><PartNumber>%d</PartNumber>"
+                         "<ETag>%s</ETag></Part>", i + 1, manager.etags[i]);
+            size += growbuffer_append(&(manager.gb), buf, n);
+        }
+        size += growbuffer_append(&(manager.gb), "</CompleteMultipartUpload>",
+                                  strlen("</CompleteMultipartUpload>"));
+        manager.remaining = size;
+
+        do {
+            S3_complete_multipart_upload(&bucketContext, key, &commit_handler,
+                                         manager.upload_id, manager.remaining,
+                                         0, timeoutMsG, &manager);
+        } while (S3_status_is_retryable(statusG) && should_retry());
+        if (statusG != S3StatusOK) {
+            printError();
+            goto clean;
+        }
+
+    clean:
+        if(manager.upload_id) {
+            free(manager.upload_id);
+        }
+        for (i = 0; i < manager.next_etags_pos; i++) {
+            free(manager.etags[i]);
+        }
+        growbuffer_destroy(manager.gb);
+        free(manager.etags);
+    }
+
+    S3_deinitialize();
+}
+
+
+// copy object ---------------------------------------------------------------
+static S3Status copyListKeyCallback(int isTruncated, const char *nextMarker,
+                                    int contentsCount,
+                                    const S3ListBucketContent *contents,
+                                    int commonPrefixesCount,
+                                    const char **commonPrefixes,
+                                    void *callbackData)
+{
+    unsigned long long *size = (unsigned long long *)callbackData;
+
+    // These are unused, avoid warnings in a hopefully portable way
+    (void)(nextMarker);
+    (void)(commonPrefixesCount);
+    (void)(commonPrefixes);
+    (void)(isTruncated);
+
+    if (contentsCount != 1) {
+        // We either have no matched or multiples...can't perform the operation
+        return S3StatusErrorUnexpectedContent;
+    }
+
+    *size = (unsigned long long) contents->size;
+    return S3StatusOK;
+}
+
+
+static void copy_object(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: source bucket/key\n");
+        usageExit(stderr);
+    }
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid source bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *sourceBucketName = argv[optindex++];
+    const char *sourceKey = slash;
+    unsigned long long sourceSize = 0;
+
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: "
+                "destination bucket/key\n");
+        usageExit(stderr);
+    }
+
+    S3_init();
+    S3BucketContext listBucketContext =
+    {
+        0,
+        sourceBucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+    S3ListBucketHandler listBucketHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &copyListKeyCallback
+    };
+    // Find size of existing key to determine if MP required
+    do {
+        S3_list_bucket(&listBucketContext, sourceKey, NULL,
+                       ".", 1, 0,
+                       timeoutMsG, &listBucketHandler, &sourceSize);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+    if (statusG != S3StatusOK) {
+        fprintf(stderr, "\nERROR: Unable to get source object size (%s)\n",
+                S3_get_status_name(statusG));
+        fprintf(stderr, "%s\n", errorDetailsG);
+        exit(1);
+    }
+    if (sourceSize > MULTIPART_CHUNK_SIZE) {
+        printf("\nUsing multipart copy because object size %llu is above %d.\n", sourceSize, MULTIPART_CHUNK_SIZE);
+        put_object(argc, argv, optindex, sourceBucketName, sourceKey, sourceSize);
+        return;
+    }
+
+    // Split bucket/key
+    slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid destination bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *destinationBucketName = argv[optindex++];
+    const char *destinationKey = slash;
+
+    const char *cacheControl = 0, *contentType = 0;
+    const char *contentDispositionFilename = 0, *contentEncoding = 0;
+    int64_t expires = -1;
+    S3CannedAcl cannedAcl = S3CannedAclPrivate;
+    int metaPropertiesCount = 0;
+    S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
+    char useServerSideEncryption = 0;
+    int anyPropertiesSet = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, CACHE_CONTROL_PREFIX,
+                          CACHE_CONTROL_PREFIX_LEN)) {
+            cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, CONTENT_TYPE_PREFIX,
+                          CONTENT_TYPE_PREFIX_LEN)) {
+            contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
+                          CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
+            contentDispositionFilename =
+                &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
+                          CONTENT_ENCODING_PREFIX_LEN)) {
+            contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+            expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+            if (expires < 0) {
+                fprintf(stderr, "\nERROR: Invalid expires time "
+                        "value; ISO 8601 time format required\n");
+                usageExit(stderr);
+            }
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
+            if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
+                fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
+                        "limit %lu: %s\n",
+                        (unsigned long) S3_MAX_METADATA_COUNT, param);
+                usageExit(stderr);
+            }
+            char *name = &(param[X_AMZ_META_PREFIX_LEN]);
+            char *value = name;
+            while (*value && (*value != '=')) {
+                value++;
+            }
+            if (!*value || !*(value + 1)) {
+                fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
+                usageExit(stderr);
+            }
+            *value++ = 0;
+            metaProperties[metaPropertiesCount].name = name;
+            metaProperties[metaPropertiesCount++].value = value;
+            anyPropertiesSet = 1;
+        }
+        else if (!strncmp(param, USE_SERVER_SIDE_ENCRYPTION_PREFIX,
+                          USE_SERVER_SIDE_ENCRYPTION_PREFIX_LEN)) {
+            if (!strcmp(param, "true") || !strcmp(param, "TRUE") ||
+                !strcmp(param, "yes") || !strcmp(param, "YES") ||
+                !strcmp(param, "1")) {
+                useServerSideEncryption = 1;
+                anyPropertiesSet = 1;
+            }
+            else {
+                useServerSideEncryption = 0;
+            }
+        }
+        else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+            char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+            if (!strcmp(val, "private")) {
+                cannedAcl = S3CannedAclPrivate;
+            }
+            else if (!strcmp(val, "public-read")) {
+                cannedAcl = S3CannedAclPublicRead;
+            }
+            else if (!strcmp(val, "public-read-write")) {
+                cannedAcl = S3CannedAclPublicReadWrite;
+            }
+            else if (!strcmp(val, "authenticated-read")) {
+                cannedAcl = S3CannedAclAuthenticatedRead;
+            }
+            else {
+                fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+                usageExit(stderr);
+            }
+            anyPropertiesSet = 1;
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        sourceBucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3PutProperties putProperties =
+    {
+        contentType,
+        0,
+        cacheControl,
+        contentDispositionFilename,
+        contentEncoding,
+        expires,
+        cannedAcl,
+        metaPropertiesCount,
+        metaProperties,
+        useServerSideEncryption
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    int64_t lastModified;
+    char eTag[256];
+
+    do {
+        S3_copy_object(&bucketContext, sourceKey, destinationBucketName,
+                       destinationKey, anyPropertiesSet ? &putProperties : 0,
+                       &lastModified, sizeof(eTag), eTag, 0,
+                       timeoutMsG,
+                       &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        if (lastModified >= 0) {
+            char timebuf[256];
+            time_t t = (time_t) lastModified;
+            strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+                     gmtime(&t));
+            printf("Last-Modified: %s\n", timebuf);
+        }
+        if (eTag[0]) {
+            printf("ETag: %s\n", eTag);
+        }
+    }
+    else {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// get object ----------------------------------------------------------------
+
+static S3Status getObjectDataCallback(int bufferSize, const char *buffer,
+                                      void *callbackData)
+{
+    FILE *outfile = (FILE *) callbackData;
+
+    size_t wrote = fwrite(buffer, 1, bufferSize, outfile);
+
+    return ((wrote < (size_t) bufferSize) ?
+            S3StatusAbortedByCallback : S3StatusOK);
+}
+
+
+static void get_object(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+        usageExit(stderr);
+    }
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+
+    const char *filename = 0;
+    int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
+    const char *ifMatch = 0, *ifNotMatch = 0;
+    uint64_t startByte = 0, byteCount = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, IF_MODIFIED_SINCE_PREFIX,
+                     IF_MODIFIED_SINCE_PREFIX_LEN)) {
+            // Parse ifModifiedSince
+            ifModifiedSince = parseIso8601Time
+                (&(param[IF_MODIFIED_SINCE_PREFIX_LEN]));
+            if (ifModifiedSince < 0) {
+                fprintf(stderr, "\nERROR: Invalid ifModifiedSince time "
+                        "value; ISO 8601 time format required\n");
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, IF_NOT_MODIFIED_SINCE_PREFIX,
+                          IF_NOT_MODIFIED_SINCE_PREFIX_LEN)) {
+            // Parse ifModifiedSince
+            ifNotModifiedSince = parseIso8601Time
+                (&(param[IF_NOT_MODIFIED_SINCE_PREFIX_LEN]));
+            if (ifNotModifiedSince < 0) {
+                fprintf(stderr, "\nERROR: Invalid ifNotModifiedSince time "
+                        "value; ISO 8601 time format required\n");
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, IF_MATCH_PREFIX, IF_MATCH_PREFIX_LEN)) {
+            ifMatch = &(param[IF_MATCH_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, IF_NOT_MATCH_PREFIX,
+                          IF_NOT_MATCH_PREFIX_LEN)) {
+            ifNotMatch = &(param[IF_NOT_MATCH_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, START_BYTE_PREFIX, START_BYTE_PREFIX_LEN)) {
+            startByte = convertInt
+                (&(param[START_BYTE_PREFIX_LEN]), "startByte");
+        }
+        else if (!strncmp(param, BYTE_COUNT_PREFIX, BYTE_COUNT_PREFIX_LEN)) {
+            byteCount = convertInt
+                (&(param[BYTE_COUNT_PREFIX_LEN]), "byteCount");
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *outfile = 0;
+
+    if (filename) {
+        // Stat the file, and if it doesn't exist, open it in w mode
+        struct stat buf;
+        if (stat(filename, &buf) == -1) {
+            outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+        }
+        else {
+            // Open in r+ so that we don't truncate the file, just in case
+            // there is an error and we write no bytes, we leave the file
+            // unmodified
+            outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+        }
+
+        if (!outfile) {
+            fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else if (showResponsePropertiesG) {
+        fprintf(stderr, "\nERROR: get -s requires a filename parameter\n");
+        usageExit(stderr);
+    }
+    else {
+        outfile = stdout;
+    }
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3GetConditions getConditions =
+    {
+        ifModifiedSince,
+        ifNotModifiedSince,
+        ifMatch,
+        ifNotMatch
+    };
+
+    S3GetObjectHandler getObjectHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &getObjectDataCallback
+    };
+
+    do {
+        S3_get_object(&bucketContext, key, &getConditions, startByte,
+                      byteCount, 0, 0, &getObjectHandler, outfile);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG != S3StatusOK) {
+        printError();
+    }
+
+    fclose(outfile);
+
+    S3_deinitialize();
+}
+
+
+// head object ---------------------------------------------------------------
+
+static void head_object(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+        usageExit(stderr);
+    }
+
+    // Head implies showing response properties
+    showResponsePropertiesG = 1;
+
+    // Split bucket/key
+    char *slash = argv[optindex];
+
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (!*slash || !*(slash + 1)) {
+        fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+                argv[optindex]);
+        usageExit(stderr);
+    }
+    *slash++ = 0;
+
+    const char *bucketName = argv[optindex++];
+    const char *key = slash;
+
+    if (optindex != argc) {
+        fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+        usageExit(stderr);
+    }
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_head_object(&bucketContext, key, 0, 0, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if ((statusG != S3StatusOK) &&
+        (statusG != S3StatusErrorPreconditionFailed)) {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// generate query string ------------------------------------------------------
+
+static void generate_query_string(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex];
+    const char *key = 0;
+
+    // Split bucket/key
+    char *slash = argv[optindex++];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (*slash) {
+        *slash++ = 0;
+        key = slash;
+    }
+    else {
+        key = 0;
+    }
+
+    int expires = -1;
+
+    const char *resource = 0;
+    const char *httpMethod = "GET";
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+            expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+            if (expires < 0) {
+                fprintf(stderr, "\nERROR: Invalid expires time "
+                        "value; ISO 8601 time format required\n");
+                usageExit(stderr);
+            }
+        }
+        else if (!strncmp(param, RESOURCE_PREFIX, RESOURCE_PREFIX_LEN)) {
+            resource = &(param[RESOURCE_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, HTTP_METHOD_PREFIX, HTTP_METHOD_PREFIX_LEN)) {
+            httpMethod = &(param[HTTP_METHOD_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    char buffer[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE];
+
+    S3Status status = S3_generate_authenticated_query_string
+        (buffer, &bucketContext, key, expires, resource, httpMethod);
+
+    if (status != S3StatusOK) {
+        printf("Failed to generate authenticated query string: %s\n",
+               S3_get_status_name(status));
+    }
+    else {
+        printf("%s\n", buffer);
+    }
+
+    S3_deinitialize();
+}
+
+
+// get acl -------------------------------------------------------------------
+
+void get_acl(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex];
+    const char *key = 0;
+
+    // Split bucket/key
+    char *slash = argv[optindex++];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (*slash) {
+        *slash++ = 0;
+        key = slash;
+    }
+    else {
+        key = 0;
+    }
+
+    const char *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *outfile = 0;
+
+    if (filename) {
+        // Stat the file, and if it doesn't exist, open it in w mode
+        struct stat buf;
+        if (stat(filename, &buf) == -1) {
+            outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+        }
+        else {
+            // Open in r+ so that we don't truncate the file, just in case
+            // there is an error and we write no bytes, we leave the file
+            // unmodified
+            outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+        }
+
+        if (!outfile) {
+            fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else if (showResponsePropertiesG) {
+        fprintf(stderr, "\nERROR: getacl -s requires a filename parameter\n");
+        usageExit(stderr);
+    }
+    else {
+        outfile = stdout;
+    }
+
+    int aclGrantCount;
+    S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+    char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+    char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_get_acl(&bucketContext, key, ownerId, ownerDisplayName,
+                   &aclGrantCount, aclGrants, 0,
+                   timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        fprintf(outfile, "OwnerID %s %s\n", ownerId, ownerDisplayName);
+        fprintf(outfile, "%-6s  %-90s  %-12s\n", " Type",
+                "                                   User Identifier",
+                " Permission");
+        fprintf(outfile, "------  "
+                "------------------------------------------------------------"
+                "------------------------------  ------------\n");
+        int i;
+        for (i = 0; i < aclGrantCount; i++) {
+            S3AclGrant *grant = &(aclGrants[i]);
+            const char *type;
+            char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
+                            S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
+            const char *id;
+
+            switch (grant->granteeType) {
+            case S3GranteeTypeAmazonCustomerByEmail:
+                type = "Email";
+                id = grant->grantee.amazonCustomerByEmail.emailAddress;
+                break;
+            case S3GranteeTypeCanonicalUser:
+                type = "UserID";
+                snprintf(composedId, sizeof(composedId),
+                         "%s (%s)", grant->grantee.canonicalUser.id,
+                         grant->grantee.canonicalUser.displayName);
+                id = composedId;
+                break;
+            case S3GranteeTypeAllAwsUsers:
+                type = "Group";
+                id = "Authenticated AWS Users";
+                break;
+            case S3GranteeTypeAllUsers:
+                type = "Group";
+                id = "All Users";
+                break;
+            default:
+                type = "Group";
+                id = "Log Delivery";
+                break;
+            }
+            const char *perm;
+            switch (grant->permission) {
+            case S3PermissionRead:
+                perm = "READ";
+                break;
+            case S3PermissionWrite:
+                perm = "WRITE";
+                break;
+            case S3PermissionReadACP:
+                perm = "READ_ACP";
+                break;
+            case S3PermissionWriteACP:
+                perm = "WRITE_ACP";
+                break;
+            default:
+                perm = "FULL_CONTROL";
+                break;
+            }
+            fprintf(outfile, "%-6s  %-90s  %-12s\n", type, id, perm);
+        }
+    }
+    else {
+        printError();
+    }
+
+    fclose(outfile);
+
+    S3_deinitialize();
+}
+
+
+// set acl -------------------------------------------------------------------
+
+void set_acl(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex];
+    const char *key = 0;
+
+    // Split bucket/key
+    char *slash = argv[optindex++];
+    while (*slash && (*slash != '/')) {
+        slash++;
+    }
+    if (*slash) {
+        *slash++ = 0;
+        key = slash;
+    }
+    else {
+        key = 0;
+    }
+
+    const char *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *infile;
+
+    if (filename) {
+        if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+            fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else {
+        infile = stdin;
+    }
+
+    // Read in the complete ACL
+    char aclBuf[65536];
+    aclBuf[fread(aclBuf, 1, sizeof(aclBuf) - 1, infile)] = 0;
+    char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+    char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+    // Parse it
+    int aclGrantCount;
+    S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+    if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
+                            &aclGrantCount, aclGrants)) {
+        fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
+        fclose(infile);
+        exit(-1);
+    }
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_set_acl(&bucketContext, key, ownerId, ownerDisplayName,
+                   aclGrantCount, aclGrants, 0,
+                   timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG != S3StatusOK) {
+        printError();
+    }
+
+    fclose(infile);
+
+    S3_deinitialize();
+}
+
+// get lifecycle -------------------------------------------------------------------
+
+void get_lifecycle(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    const char *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *outfile = 0;
+
+    if (filename) {
+        // Stat the file, and if it doesn't exist, open it in w mode
+        struct stat buf;
+        if (stat(filename, &buf) == -1) {
+            outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+        }
+        else {
+            // Open in r+ so that we don't truncate the file, just in case
+            // there is an error and we write no bytes, we leave the file
+            // unmodified
+            outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+        }
+
+        if (!outfile) {
+            fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else if (showResponsePropertiesG) {
+        fprintf(stderr, "\nERROR: getlifecycle -s requires a filename parameter\n");
+        usageExit(stderr);
+    }
+    else {
+        outfile = stdout;
+    }
+
+    char lifecycleBuffer[64 * 1024];
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_get_lifecycle(&bucketContext,
+                         lifecycleBuffer, sizeof(lifecycleBuffer),
+                         0, timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        fprintf(outfile, "%s", lifecycleBuffer);
+    }
+    else {
+        printError();
+    }
+
+    fclose(outfile);
+
+    S3_deinitialize();
+}
+
+
+// set lifecycle -------------------------------------------------------------------
+
+void set_lifecycle(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    const char *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *infile;
+
+    if (filename) {
+        if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+            fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else {
+        infile = stdin;
+    }
+
+    // Read in the complete ACL
+    char lifecycleBuf[65536];
+    lifecycleBuf[fread(lifecycleBuf, 1, sizeof(lifecycleBuf) - 1, infile)] = 0;
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_set_lifecycle(&bucketContext,
+                         lifecycleBuf,
+                         0, timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG != S3StatusOK) {
+        printError();
+    }
+
+    fclose(infile);
+
+    S3_deinitialize();
+}
+
+
+// get logging ----------------------------------------------------------------
+
+void get_logging(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+    const char *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    FILE *outfile = 0;
+
+    if (filename) {
+        // Stat the file, and if it doesn't exist, open it in w mode
+        struct stat buf;
+        if (stat(filename, &buf) == -1) {
+            outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+        }
+        else {
+            // Open in r+ so that we don't truncate the file, just in case
+            // there is an error and we write no bytes, we leave the file
+            // unmodified
+            outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+        }
+
+        if (!outfile) {
+            fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+                    filename);
+            perror(0);
+            exit(-1);
+        }
+    }
+    else if (showResponsePropertiesG) {
+        fprintf(stderr, "\nERROR: getlogging -s requires a filename "
+                "parameter\n");
+        usageExit(stderr);
+    }
+    else {
+        outfile = stdout;
+    }
+
+    int aclGrantCount;
+    S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+    char targetBucket[S3_MAX_BUCKET_NAME_SIZE];
+    char targetPrefix[S3_MAX_KEY_SIZE];
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_get_server_access_logging(&bucketContext, targetBucket, targetPrefix,
+                                     &aclGrantCount, aclGrants, 0,
+                                     timeoutMsG,
+                                     &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG == S3StatusOK) {
+        if (targetBucket[0]) {
+            printf("Target Bucket: %s\n", targetBucket);
+            if (targetPrefix[0]) {
+                printf("Target Prefix: %s\n", targetPrefix);
+            }
+            fprintf(outfile, "%-6s  %-90s  %-12s\n", " Type",
+                    "                                   User Identifier",
+                    " Permission");
+            fprintf(outfile, "------  "
+                    "---------------------------------------------------------"
+                    "---------------------------------  ------------\n");
+            int i;
+            for (i = 0; i < aclGrantCount; i++) {
+                S3AclGrant *grant = &(aclGrants[i]);
+                const char *type;
+                char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
+                                S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
+                const char *id;
+
+                switch (grant->granteeType) {
+                case S3GranteeTypeAmazonCustomerByEmail:
+                    type = "Email";
+                    id = grant->grantee.amazonCustomerByEmail.emailAddress;
+                    break;
+                case S3GranteeTypeCanonicalUser:
+                    type = "UserID";
+                    snprintf(composedId, sizeof(composedId),
+                             "%s (%s)", grant->grantee.canonicalUser.id,
+                             grant->grantee.canonicalUser.displayName);
+                    id = composedId;
+                    break;
+                case S3GranteeTypeAllAwsUsers:
+                    type = "Group";
+                    id = "Authenticated AWS Users";
+                    break;
+                default:
+                    type = "Group";
+                    id = "All Users";
+                    break;
+                }
+                const char *perm;
+                switch (grant->permission) {
+                case S3PermissionRead:
+                    perm = "READ";
+                    break;
+                case S3PermissionWrite:
+                    perm = "WRITE";
+                    break;
+                case S3PermissionReadACP:
+                    perm = "READ_ACP";
+                    break;
+                case S3PermissionWriteACP:
+                    perm = "WRITE_ACP";
+                    break;
+                default:
+                    perm = "FULL_CONTROL";
+                    break;
+                }
+                fprintf(outfile, "%-6s  %-90s  %-12s\n", type, id, perm);
+            }
+        }
+        else {
+            printf("Service logging is not enabled for this bucket.\n");
+        }
+    }
+    else {
+        printError();
+    }
+
+    fclose(outfile);
+
+    S3_deinitialize();
+}
+
+
+// set logging ----------------------------------------------------------------
+
+void set_logging(int argc, char **argv, int optindex)
+{
+    if (optindex == argc) {
+        fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+        usageExit(stderr);
+    }
+
+    const char *bucketName = argv[optindex++];
+
+    const char *targetBucket = 0, *targetPrefix = 0, *filename = 0;
+
+    while (optindex < argc) {
+        char *param = argv[optindex++];
+        if (!strncmp(param, TARGET_BUCKET_PREFIX, TARGET_BUCKET_PREFIX_LEN)) {
+            targetBucket = &(param[TARGET_BUCKET_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, TARGET_PREFIX_PREFIX,
+                          TARGET_PREFIX_PREFIX_LEN)) {
+            targetPrefix = &(param[TARGET_PREFIX_PREFIX_LEN]);
+        }
+        else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+            filename = &(param[FILENAME_PREFIX_LEN]);
+        }
+        else {
+            fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+            usageExit(stderr);
+        }
+    }
+
+    int aclGrantCount = 0;
+    S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+
+    if (targetBucket) {
+        FILE *infile;
+
+        if (filename) {
+            if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+                fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+                        filename);
+                perror(0);
+                exit(-1);
+            }
+        }
+        else {
+            infile = stdin;
+        }
+
+        // Read in the complete ACL
+        char aclBuf[65536];
+        aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
+        char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+        char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+        // Parse it
+        if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
+                                &aclGrantCount, aclGrants)) {
+            fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
+            fclose(infile);
+            exit(-1);
+        }
+
+        fclose(infile);
+    }
+
+    S3_init();
+
+    S3BucketContext bucketContext =
+    {
+        0,
+        bucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0,
+        awsRegionG
+    };
+
+    S3ResponseHandler responseHandler =
+    {
+        &responsePropertiesCallback,
+        &responseCompleteCallback
+    };
+
+    do {
+        S3_set_server_access_logging(&bucketContext, targetBucket,
+                                     targetPrefix, aclGrantCount, aclGrants,
+                                     0,
+                                     timeoutMsG, &responseHandler, 0);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+
+    if (statusG != S3StatusOK) {
+        printError();
+    }
+
+    S3_deinitialize();
+}
+
+
+// main ----------------------------------------------------------------------
+
+int main(int argc, char **argv)
+{
+    // Parse args
+    while (1) {
+        int idx = 0;
+        int c = getopt_long(argc, argv, "vfhusr:t:g:", longOptionsG, &idx);
+
+        if (c == -1) {
+            // End of options
+            break;
+        }
+
+        switch (c) {
+        case 'f':
+            forceG = 1;
+            break;
+        case 'h':
+            uriStyleG = S3UriStyleVirtualHost;
+            break;
+        case 'u':
+            protocolG = S3ProtocolHTTP;
+            break;
+        case 's':
+            showResponsePropertiesG = 1;
+            break;
+        case 'r': {
+            const char *v = optarg;
+            retriesG = 0;
+            while (*v) {
+                retriesG *= 10;
+                retriesG += *v - '0';
+                v++;
+            }
+            }
+            break;
+        case 't': {
+            const char *v = optarg;
+            timeoutMsG = 0;
+            while (*v) {
+                timeoutMsG *= 10;
+                timeoutMsG += *v - '0';
+                v++;
+            }
+            }
+            break;
+        case 'v':
+            verifyPeerG = S3_INIT_VERIFY_PEER;
+            break;
+        case 'g':
+            awsRegionG = strdup(optarg);
+            break;
+        default:
+            fprintf(stderr, "\nERROR: Unknown option: -%c\n", c);
+            // Usage exit
+            usageExit(stderr);
+        }
+    }
+
+    // The first non-option argument gives the operation to perform
+    if (optind == argc) {
+        fprintf(stderr, "\n\nERROR: Missing argument: command\n\n");
+        usageExit(stderr);
+    }
+
+    const char *command = argv[optind++];
+
+    if (!strcmp(command, "help")) {
+        fprintf(stdout, "\ns3 is a program for performing single requests "
+                "to Amazon S3.\n");
+        usageExit(stdout);
+    }
+
+    accessKeyIdG = getenv("S3_ACCESS_KEY_ID");
+    if (!accessKeyIdG) {
+        fprintf(stderr, "Missing environment variable: S3_ACCESS_KEY_ID\n");
+        return -1;
+    }
+    secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY");
+    if (!secretAccessKeyG) {
+        fprintf(stderr,
+                "Missing environment variable: S3_SECRET_ACCESS_KEY\n");
+        return -1;
+    }
+
+    if (!strcmp(command, "list")) {
+        list(argc, argv, optind);
+    }
+    else if (!strcmp(command, "test")) {
+        test_bucket(argc, argv, optind);
+    }
+    else if (!strcmp(command, "create")) {
+        create_bucket(argc, argv, optind);
+    }
+    else if (!strcmp(command, "delete")) {
+        if (optind == argc) {
+            fprintf(stderr,
+                    "\nERROR: Missing parameter: bucket or bucket/key\n");
+            usageExit(stderr);
+        }
+        char *val = argv[optind];
+        int hasSlash = 0;
+        while (*val) {
+            if (*val++ == '/') {
+                hasSlash = 1;
+                break;
+            }
+        }
+        if (hasSlash) {
+            delete_object(argc, argv, optind);
+        }
+        else {
+            delete_bucket(argc, argv, optind);
+        }
+    }
+    else if (!strcmp(command, "put")) {
+        put_object(argc, argv, optind, NULL, NULL, 0);
+    }
+    else if (!strcmp(command, "copy")) {
+        copy_object(argc, argv, optind);
+    }
+    else if (!strcmp(command, "get")) {
+        get_object(argc, argv, optind);
+    }
+    else if (!strcmp(command, "head")) {
+        head_object(argc, argv, optind);
+    }
+    else if (!strcmp(command, "gqs")) {
+        generate_query_string(argc, argv, optind);
+    }
+    else if (!strcmp(command, "getacl")) {
+        get_acl(argc, argv, optind);
+    }
+    else if (!strcmp(command, "setacl")) {
+        set_acl(argc, argv, optind);
+    }
+    else if (!strcmp(command, "getlifecycle")) {
+        get_lifecycle(argc, argv, optind);
+    }
+    else if (!strcmp(command, "setlifecycle")) {
+        set_lifecycle(argc, argv, optind);
+    }
+    else if (!strcmp(command, "getlogging")) {
+        get_logging(argc, argv, optind);
+    }
+    else if (!strcmp(command, "setlogging")) {
+        set_logging(argc, argv, optind);
+    }
+    else if (!strcmp(command, "listmultiparts")) {
+        list_multipart_uploads(argc, argv, optind);
+    }
+    else if (!strcmp(command, "abortmp")) {
+        abort_multipart_upload(argc, argv, optind);
+    }
+    else if (!strcmp(command, "listparts")) {
+        list_parts(argc, argv, optind);
+    }
+    else {
+        fprintf(stderr, "Unknown command: %s\n", command);
+        return -1;
+    }
+
+    return 0;
+}

+ 196 - 0
libs/libs3/src/service.c

@@ -0,0 +1,196 @@
+/** **************************************************************************
+ * service.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include "request.h"
+
+
+typedef struct XmlCallbackData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ListServiceCallback *listServiceCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    string_buffer(ownerId, 256);
+    string_buffer(ownerDisplayName, 256);
+    string_buffer(bucketName, 256);
+    string_buffer(creationDate, 128);
+} XmlCallbackData;
+
+
+static S3Status xmlCallback(const char *elementPath, const char *data,
+                            int dataLen, void *callbackData)
+{
+    XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "ListAllMyBucketsResult/Owner/ID")) {
+            string_buffer_append(cbData->ownerId, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListAllMyBucketsResult/Owner/DisplayName")) {
+            string_buffer_append(cbData->ownerDisplayName, data, dataLen, fit);
+        }
+        else if (!strcmp(elementPath,
+                         "ListAllMyBucketsResult/Buckets/Bucket/Name")) {
+            string_buffer_append(cbData->bucketName, data, dataLen, fit);
+        }
+        else if (!strcmp
+                 (elementPath,
+                  "ListAllMyBucketsResult/Buckets/Bucket/CreationDate")) {
+            string_buffer_append(cbData->creationDate, data, dataLen, fit);
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "ListAllMyBucketsResult/Buckets/Bucket")) {
+            // Parse date.  Assume ISO-8601 date format.
+            time_t creationDate = parseIso8601Time(cbData->creationDate);
+
+            // Make the callback - a bucket just finished
+            S3Status status = (*(cbData->listServiceCallback))
+                (cbData->ownerId, cbData->ownerDisplayName,
+                 cbData->bucketName, creationDate, cbData->callbackData);
+
+            string_buffer_initialize(cbData->bucketName);
+            string_buffer_initialize(cbData->creationDate);
+
+            return status;
+        }
+    }
+
+    /* Avoid compiler error about variable set but not used */
+    (void) fit;
+
+    return S3StatusOK;
+}
+
+
+static S3Status propertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+    return (*(cbData->responsePropertiesCallback))
+        (responseProperties, cbData->callbackData);
+}
+
+
+static S3Status dataCallback(int bufferSize, const char *buffer,
+                             void *callbackData)
+{
+    XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+    return simplexml_add(&(cbData->simpleXml), buffer, bufferSize);
+}
+
+
+static void completeCallback(S3Status requestStatus,
+                             const S3ErrorDetails *s3ErrorDetails,
+                             void *callbackData)
+{
+    XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+    (*(cbData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, cbData->callbackData);
+
+    simplexml_deinitialize(&(cbData->simpleXml));
+
+    free(cbData);
+}
+
+
+void S3_list_service(S3Protocol protocol, const char *accessKeyId,
+                     const char *secretAccessKey, const char *securityToken,
+                     const char *hostName, const char *authRegion,
+                     S3RequestContext *requestContext,
+                     int timeoutMs,
+                     const S3ListServiceHandler *handler, void *callbackData)
+{
+    // Create and set up the callback data
+    XmlCallbackData *data =
+        (XmlCallbackData *) malloc(sizeof(XmlCallbackData));
+    if (!data) {
+        (*(handler->responseHandler.completeCallback))
+            (S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    simplexml_initialize(&(data->simpleXml), &xmlCallback, data);
+
+    data->responsePropertiesCallback =
+        handler->responseHandler.propertiesCallback;
+    data->listServiceCallback = handler->listServiceCallback;
+    data->responseCompleteCallback = handler->responseHandler.completeCallback;
+    data->callbackData = callbackData;
+
+    string_buffer_initialize(data->ownerId);
+    string_buffer_initialize(data->ownerDisplayName);
+    string_buffer_initialize(data->bucketName);
+    string_buffer_initialize(data->creationDate);
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { hostName,                                   // hostName
+          0,                                          // bucketName
+          protocol,                                   // protocol
+          S3UriStylePath,                             // uriStyle
+          accessKeyId,                                // accessKeyId
+          secretAccessKey,                            // secretAccessKey
+          securityToken,                              // securityToken
+          authRegion },                               // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        0,                                            // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // requestProperties
+        &propertiesCallback,                          // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &dataCallback,                                // fromS3Callback
+        &completeCallback,                            // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+

+ 563 - 0
libs/libs3/src/service_access_logging.c

@@ -0,0 +1,563 @@
+/** **************************************************************************
+ * server_access_logging.c
+ *
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ *
+ * This file is part of libs3.
+ *
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <stdlib.h>
+#include <string.h>
+#include "libs3.h"
+#include "request.h"
+
+
+// get server access logging---------------------------------------------------
+
+typedef struct ConvertBlsData
+{
+    char *targetBucketReturn;
+    int targetBucketReturnLen;
+    char *targetPrefixReturn;
+    int targetPrefixReturnLen;
+    int *aclGrantCountReturn;
+    S3AclGrant *aclGrants;
+
+    string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
+    string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
+    string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+    string_buffer(groupUri, 128);
+    string_buffer(permission, 32);
+} ConvertBlsData;
+
+
+static S3Status convertBlsXmlCallback(const char *elementPath,
+                                      const char *data, int dataLen,
+                                      void *callbackData)
+{
+    ConvertBlsData *caData = (ConvertBlsData *) callbackData;
+
+    int fit;
+
+    if (data) {
+        if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                    "TargetBucket")) {
+            caData->targetBucketReturnLen +=
+                snprintf(&(caData->targetBucketReturn
+                           [caData->targetBucketReturnLen]),
+                         255 - caData->targetBucketReturnLen - 1,
+                         "%.*s", dataLen, data);
+            if (caData->targetBucketReturnLen >= 255) {
+                return S3StatusTargetBucketTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                    "TargetPrefix")) {
+            caData->targetPrefixReturnLen +=
+                snprintf(&(caData->targetPrefixReturn
+                           [caData->targetPrefixReturnLen]),
+                         255 - caData->targetPrefixReturnLen - 1,
+                         "%.*s", dataLen, data);
+            if (caData->targetPrefixReturnLen >= 255) {
+                return S3StatusTargetPrefixTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                         "TargetGrants/Grant/Grantee/EmailAddress")) {
+            // AmazonCustomerByEmail
+            string_buffer_append(caData->emailAddress, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusEmailAddressTooLong;
+            }
+        }
+        else if (!strcmp(elementPath,
+                         "AccessControlPolicy/AccessControlList/Grant/"
+                         "Grantee/ID")) {
+            // CanonicalUser
+            string_buffer_append(caData->userId, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusUserIdTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                         "TargetGrants/Grant/Grantee/DisplayName")) {
+            // CanonicalUser
+            string_buffer_append(caData->userDisplayName, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusUserDisplayNameTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                         "TargetGrants/Grant/Grantee/URI")) {
+            // Group
+            string_buffer_append(caData->groupUri, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusGroupUriTooLong;
+            }
+        }
+        else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                         "TargetGrants/Grant/Permission")) {
+            // Permission
+            string_buffer_append(caData->permission, data, dataLen, fit);
+            if (!fit) {
+                return S3StatusPermissionTooLong;
+            }
+        }
+    }
+    else {
+        if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+                    "TargetGrants/Grant")) {
+            // A grant has just been completed; so add the next S3AclGrant
+            // based on the values read
+            if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
+                return S3StatusTooManyGrants;
+            }
+
+            S3AclGrant *grant = &(caData->aclGrants
+                                  [*(caData->aclGrantCountReturn)]);
+
+            if (caData->emailAddress[0]) {
+                grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+                strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
+                       caData->emailAddress);
+            }
+            else if (caData->userId[0] && caData->userDisplayName[0]) {
+                grant->granteeType = S3GranteeTypeCanonicalUser;
+                strcpy(grant->grantee.canonicalUser.id, caData->userId);
+                strcpy(grant->grantee.canonicalUser.displayName,
+                       caData->userDisplayName);
+            }
+            else if (caData->groupUri[0]) {
+                if (!strcmp(caData->groupUri,
+                            ACS_GROUP_AWS_USERS)) {
+                    grant->granteeType = S3GranteeTypeAllAwsUsers;
+                }
+                else if (!strcmp(caData->groupUri,
+                                 ACS_GROUP_ALL_USERS)) {
+                    grant->granteeType = S3GranteeTypeAllUsers;
+                }
+                else {
+                    return S3StatusBadGrantee;
+                }
+            }
+            else {
+                return S3StatusBadGrantee;
+            }
+
+            if (!strcmp(caData->permission, "READ")) {
+                grant->permission = S3PermissionRead;
+            }
+            else if (!strcmp(caData->permission, "WRITE")) {
+                grant->permission = S3PermissionWrite;
+            }
+            else if (!strcmp(caData->permission, "READ_ACP")) {
+                grant->permission = S3PermissionReadACP;
+            }
+            else if (!strcmp(caData->permission, "WRITE_ACP")) {
+                grant->permission = S3PermissionWriteACP;
+            }
+            else if (!strcmp(caData->permission, "FULL_CONTROL")) {
+                grant->permission = S3PermissionFullControl;
+            }
+            else {
+                return S3StatusBadPermission;
+            }
+
+            (*(caData->aclGrantCountReturn))++;
+
+            string_buffer_initialize(caData->emailAddress);
+            string_buffer_initialize(caData->userId);
+            string_buffer_initialize(caData->userDisplayName);
+            string_buffer_initialize(caData->groupUri);
+            string_buffer_initialize(caData->permission);
+        }
+    }
+
+    return S3StatusOK;
+}
+
+
+static S3Status convert_bls(char *blsXml, char *targetBucketReturn,
+                            char *targetPrefixReturn, int *aclGrantCountReturn,
+                            S3AclGrant *aclGrants)
+{
+    ConvertBlsData data;
+
+    data.targetBucketReturn = targetBucketReturn;
+    data.targetBucketReturn[0] = 0;
+    data.targetBucketReturnLen = 0;
+    data.targetPrefixReturn = targetPrefixReturn;
+    data.targetPrefixReturn[0] = 0;
+    data.targetPrefixReturnLen = 0;
+    data.aclGrantCountReturn = aclGrantCountReturn;
+    data.aclGrants = aclGrants;
+    *aclGrantCountReturn = 0;
+    string_buffer_initialize(data.emailAddress);
+    string_buffer_initialize(data.userId);
+    string_buffer_initialize(data.userDisplayName);
+    string_buffer_initialize(data.groupUri);
+    string_buffer_initialize(data.permission);
+
+    // Use a simplexml parser
+    SimpleXml simpleXml;
+    simplexml_initialize(&simpleXml, &convertBlsXmlCallback, &data);
+
+    S3Status status = simplexml_add(&simpleXml, blsXml, strlen(blsXml));
+
+    simplexml_deinitialize(&simpleXml);
+
+    return status;
+}
+
+
+// Use a rather arbitrary max size for the document of 64K
+#define BLS_XML_DOC_MAXSIZE (64 * 1024)
+
+
+typedef struct GetBlsData
+{
+    SimpleXml simpleXml;
+
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    char *targetBucketReturn;
+    char *targetPrefixReturn;
+    int *aclGrantCountReturn;
+    S3AclGrant *aclGrants;
+    string_buffer(blsXmlDocument, BLS_XML_DOC_MAXSIZE);
+} GetBlsData;
+
+
+static S3Status getBlsPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    GetBlsData *gsData = (GetBlsData *) callbackData;
+
+    return (*(gsData->responsePropertiesCallback))
+        (responseProperties, gsData->callbackData);
+}
+
+
+static S3Status getBlsDataCallback(int bufferSize, const char *buffer,
+                                   void *callbackData)
+{
+    GetBlsData *gsData = (GetBlsData *) callbackData;
+
+    int fit;
+
+    string_buffer_append(gsData->blsXmlDocument, buffer, bufferSize, fit);
+
+    return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
+}
+
+
+static void getBlsCompleteCallback(S3Status requestStatus,
+                                   const S3ErrorDetails *s3ErrorDetails,
+                                   void *callbackData)
+{
+    GetBlsData *gsData = (GetBlsData *) callbackData;
+
+    if (requestStatus == S3StatusOK) {
+        // Parse the document
+        requestStatus = convert_bls
+            (gsData->blsXmlDocument, gsData->targetBucketReturn,
+             gsData->targetPrefixReturn, gsData->aclGrantCountReturn,
+             gsData->aclGrants);
+    }
+
+    (*(gsData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, gsData->callbackData);
+
+    free(gsData);
+}
+
+
+void S3_get_server_access_logging(const S3BucketContext *bucketContext,
+                                  char *targetBucketReturn,
+                                  char *targetPrefixReturn,
+                                  int *aclGrantCountReturn,
+                                  S3AclGrant *aclGrants,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  const S3ResponseHandler *handler,
+                                  void *callbackData)
+{
+    // Create the callback data
+    GetBlsData *gsData = (GetBlsData *) malloc(sizeof(GetBlsData));
+    if (!gsData) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    gsData->responsePropertiesCallback = handler->propertiesCallback;
+    gsData->responseCompleteCallback = handler->completeCallback;
+    gsData->callbackData = callbackData;
+
+    gsData->targetBucketReturn = targetBucketReturn;
+    gsData->targetPrefixReturn = targetPrefixReturn;
+    gsData->aclGrantCountReturn = aclGrantCountReturn;
+    gsData->aclGrants = aclGrants;
+    string_buffer_initialize(gsData->blsXmlDocument);
+    *aclGrantCountReturn = 0;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypeGET,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        "logging",                                    // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &getBlsPropertiesCallback,                    // propertiesCallback
+        0,                                            // toS3Callback
+        0,                                            // toS3CallbackTotalSize
+        &getBlsDataCallback,                          // fromS3Callback
+        &getBlsCompleteCallback,                      // completeCallback
+        gsData,                                       // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}
+
+
+
+// set server access logging---------------------------------------------------
+
+static S3Status generateSalXmlDocument(const char *targetBucket,
+                                       const char *targetPrefix,
+                                       int aclGrantCount,
+                                       const S3AclGrant *aclGrants,
+                                       int *xmlDocumentLenReturn,
+                                       char *xmlDocument,
+                                       int xmlDocumentBufferSize)
+{
+    *xmlDocumentLenReturn = 0;
+
+#define append(fmt, ...)                                        \
+    do {                                                        \
+        *xmlDocumentLenReturn += snprintf                       \
+            (&(xmlDocument[*xmlDocumentLenReturn]),             \
+             xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
+             fmt, __VA_ARGS__);                                 \
+        if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) {   \
+            return S3StatusXmlDocumentTooLarge;                 \
+        } \
+    } while (0)
+
+    append("%s", "<BucketLoggingStatus "
+           "xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\">");
+
+    if (targetBucket && targetBucket[0]) {
+        append("<LoggingEnabled><TargetBucket>%s</TargetBucket>", targetBucket);
+        append("<TargetPrefix>%s</TargetPrefix>",
+               targetPrefix ? targetPrefix : "");
+
+        if (aclGrantCount) {
+            append("%s", "<TargetGrants>");
+            int i;
+            for (i = 0; i < aclGrantCount; i++) {
+                append("%s", "<Grant><Grantee "
+                       "xmlns:xsi=\"http://www.w3.org/2001/"
+                       "XMLSchema-instance\" xsi:type=\"");
+                const S3AclGrant *grant = &(aclGrants[i]);
+                switch (grant->granteeType) {
+                case S3GranteeTypeAmazonCustomerByEmail:
+                    append("AmazonCustomerByEmail\"><EmailAddress>%s"
+                           "</EmailAddress>",
+                           grant->grantee.amazonCustomerByEmail.emailAddress);
+                    break;
+                case S3GranteeTypeCanonicalUser:
+                    append("CanonicalUser\"><ID>%s</ID><DisplayName>%s"
+                           "</DisplayName>",
+                           grant->grantee.canonicalUser.id,
+                           grant->grantee.canonicalUser.displayName);
+                    break;
+                default: // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
+                    append("Group\"><URI>%s</URI>",
+                           (grant->granteeType == S3GranteeTypeAllAwsUsers) ?
+                           ACS_GROUP_AWS_USERS : ACS_GROUP_ALL_USERS);
+                    break;
+                }
+                append("</Grantee><Permission>%s</Permission></Grant>",
+                       ((grant->permission == S3PermissionRead) ? "READ" :
+                        (grant->permission == S3PermissionWrite) ? "WRITE" :
+                        (grant->permission ==
+                         S3PermissionReadACP) ? "READ_ACP" :
+                        (grant->permission ==
+                         S3PermissionWriteACP) ? "WRITE_ACP" : "FULL_CONTROL"));
+            }
+            append("%s", "</TargetGrants>");
+        }
+        append("%s", "</LoggingEnabled>");
+    }
+
+    append("%s", "</BucketLoggingStatus>");
+
+    return S3StatusOK;
+}
+
+
+typedef struct SetSalData
+{
+    S3ResponsePropertiesCallback *responsePropertiesCallback;
+    S3ResponseCompleteCallback *responseCompleteCallback;
+    void *callbackData;
+
+    int salXmlDocumentLen;
+    char salXmlDocument[BLS_XML_DOC_MAXSIZE];
+    int salXmlDocumentBytesWritten;
+
+} SetSalData;
+
+
+static S3Status setSalPropertiesCallback
+    (const S3ResponseProperties *responseProperties, void *callbackData)
+{
+    SetSalData *paData = (SetSalData *) callbackData;
+
+    return (*(paData->responsePropertiesCallback))
+        (responseProperties, paData->callbackData);
+}
+
+
+static int setSalDataCallback(int bufferSize, char *buffer, void *callbackData)
+{
+    SetSalData *paData = (SetSalData *) callbackData;
+
+    int remaining = (paData->salXmlDocumentLen -
+                     paData->salXmlDocumentBytesWritten);
+
+    int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+    if (!toCopy) {
+        return 0;
+    }
+
+    memcpy(buffer, &(paData->salXmlDocument
+                     [paData->salXmlDocumentBytesWritten]), toCopy);
+
+    paData->salXmlDocumentBytesWritten += toCopy;
+
+    return toCopy;
+}
+
+
+static void setSalCompleteCallback(S3Status requestStatus,
+                                   const S3ErrorDetails *s3ErrorDetails,
+                                   void *callbackData)
+{
+    SetSalData *paData = (SetSalData *) callbackData;
+
+    (*(paData->responseCompleteCallback))
+        (requestStatus, s3ErrorDetails, paData->callbackData);
+
+    free(paData);
+}
+
+
+void S3_set_server_access_logging(const S3BucketContext *bucketContext,
+                                  const char *targetBucket,
+                                  const char *targetPrefix, int aclGrantCount,
+                                  const S3AclGrant *aclGrants,
+                                  S3RequestContext *requestContext,
+                                  int timeoutMs,
+                                  const S3ResponseHandler *handler,
+                                  void *callbackData)
+{
+    if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
+        (*(handler->completeCallback))
+            (S3StatusTooManyGrants, 0, callbackData);
+        return;
+    }
+
+    SetSalData *data = (SetSalData *) malloc(sizeof(SetSalData));
+    if (!data) {
+        (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+        return;
+    }
+
+    // Convert aclGrants to XML document
+    S3Status status = generateSalXmlDocument
+        (targetBucket, targetPrefix, aclGrantCount, aclGrants,
+         &(data->salXmlDocumentLen), data->salXmlDocument,
+         sizeof(data->salXmlDocument));
+    if (status != S3StatusOK) {
+        free(data);
+        (*(handler->completeCallback))(status, 0, callbackData);
+        return;
+    }
+
+    data->responsePropertiesCallback = handler->propertiesCallback;
+    data->responseCompleteCallback = handler->completeCallback;
+    data->callbackData = callbackData;
+
+    data->salXmlDocumentBytesWritten = 0;
+
+    // Set up the RequestParams
+    RequestParams params =
+    {
+        HttpRequestTypePUT,                           // httpRequestType
+        { bucketContext->hostName,                    // hostName
+          bucketContext->bucketName,                  // bucketName
+          bucketContext->protocol,                    // protocol
+          bucketContext->uriStyle,                    // uriStyle
+          bucketContext->accessKeyId,                 // accessKeyId
+          bucketContext->secretAccessKey,             // secretAccessKey
+          bucketContext->securityToken,               // securityToken
+          bucketContext->authRegion },                // authRegion
+        0,                                            // key
+        0,                                            // queryParams
+        "logging",                                    // subResource
+        0,                                            // copySourceBucketName
+        0,                                            // copySourceKey
+        0,                                            // getConditions
+        0,                                            // startByte
+        0,                                            // byteCount
+        0,                                            // putProperties
+        &setSalPropertiesCallback,                    // propertiesCallback
+        &setSalDataCallback,                          // toS3Callback
+        data->salXmlDocumentLen,                      // toS3CallbackTotalSize
+        0,                                            // fromS3Callback
+        &setSalCompleteCallback,                      // completeCallback
+        data,                                         // callbackData
+        timeoutMs                                     // timeoutMs
+    };
+
+    // Perform the request
+    request_perform(&params, requestContext);
+}

+ 207 - 0
libs/libs3/src/simplexml.c

@@ -0,0 +1,207 @@
+/** **************************************************************************
+ * simplexml.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <libxml/parser.h>
+#include <string.h>
+#include "simplexml.h"
+
+// Use libxml2 for parsing XML.  XML is severely overused in modern
+// computing.  It is useful for only a very small subset of tasks, but
+// software developers who don't know better and are afraid to go against the
+// grain use it for everything, and in most cases, it is completely
+// inappropriate.  Usually, the document structure is severely under-specified
+// as well, as is the case with S3.  We do our best by just caring about the
+// most important aspects of the S3 "XML document" responses: the elements and
+// their values.  The SAX API (just about the lamest API ever devised and
+// proof that XML sucks - well, the real proof is how crappy all of the XML
+// parsing libraries are, including libxml2 - but I digress) is used here
+// because we don't need much from the parser and SAX is fast and low memory.
+//
+// Note that for simplicity we assume all ASCII here.  No attempts are made to
+// detect non-ASCII sequences in utf-8 and convert them into ASCII in any way.
+// S3 appears to only use ASCII anyway.
+
+
+static xmlEntityPtr saxGetEntity(void *user_data, const xmlChar *name)
+{
+    (void) user_data;
+
+    return xmlGetPredefinedEntity(name);
+}
+
+
+static void saxStartElement(void *user_data, const xmlChar *nameUtf8,
+                            const xmlChar **attr)
+{
+    (void) attr;
+
+    SimpleXml *simpleXml = (SimpleXml *) user_data;
+
+    if (simpleXml->status != S3StatusOK) {
+        return;
+    }
+    
+    // Assume that name has no non-ASCII in it
+    char *name = (char *) nameUtf8;
+
+    // Append the element to the element path
+    int len = strlen(name);
+
+    if ((simpleXml->elementPathLen + len + 1) >= 
+        (int) sizeof(simpleXml->elementPath)) {
+        // Cannot handle this element, stop!
+        simpleXml->status = S3StatusXmlParseFailure;
+        return;
+    }
+
+    if (simpleXml->elementPathLen) {
+        simpleXml->elementPath[simpleXml->elementPathLen++] = '/';
+    }
+    strcpy(&(simpleXml->elementPath[simpleXml->elementPathLen]), name);
+    simpleXml->elementPathLen += len;
+}
+
+
+static void saxEndElement(void *user_data, const xmlChar *name)
+{
+    (void) name;
+
+    SimpleXml *simpleXml = (SimpleXml *) user_data;
+
+    if (simpleXml->status != S3StatusOK) {
+        return;
+    }
+
+    // Call back with 0 data
+    simpleXml->status = (*(simpleXml->callback))
+        (simpleXml->elementPath, 0, 0, simpleXml->callbackData);
+
+    while ((simpleXml->elementPathLen > 0) &&
+           (simpleXml->elementPath[simpleXml->elementPathLen] != '/')) {
+        simpleXml->elementPathLen--;
+    }
+
+    simpleXml->elementPath[simpleXml->elementPathLen] = 0;
+}
+
+
+static void saxCharacters(void *user_data, const xmlChar *ch, int len)
+{
+    SimpleXml *simpleXml = (SimpleXml *) user_data;
+
+    if (simpleXml->status != S3StatusOK) {
+        return;
+    }
+
+    simpleXml->status = (*(simpleXml->callback))
+        (simpleXml->elementPath, (char *) ch, len, simpleXml->callbackData);
+}
+
+
+static void saxError(void *user_data, const char *msg, ...)
+{
+    (void) msg;
+
+    SimpleXml *simpleXml = (SimpleXml *) user_data;
+
+    if (simpleXml->status != S3StatusOK) {
+        return;
+    }
+
+    simpleXml->status = S3StatusXmlParseFailure;
+}
+
+
+static struct _xmlSAXHandler saxHandlerG =
+{
+    0, // internalSubsetSAXFunc
+    0, // isStandaloneSAXFunc
+    0, // hasInternalSubsetSAXFunc
+    0, // hasExternalSubsetSAXFunc
+    0, // resolveEntitySAXFunc
+    &saxGetEntity, // getEntitySAXFunc
+    0, // entityDeclSAXFunc
+    0, // notationDeclSAXFunc
+    0, // attributeDeclSAXFunc
+    0, // elementDeclSAXFunc
+    0, // unparsedEntityDeclSAXFunc
+    0, // setDocumentLocatorSAXFunc
+    0, // startDocumentSAXFunc
+    0, // endDocumentSAXFunc
+    &saxStartElement, // startElementSAXFunc
+    &saxEndElement, // endElementSAXFunc
+    0, // referenceSAXFunc
+    &saxCharacters, // charactersSAXFunc
+    0, // ignorableWhitespaceSAXFunc
+    0, // processingInstructionSAXFunc
+    0, // commentSAXFunc
+    0, // warningSAXFunc
+    &saxError, // errorSAXFunc
+    &saxError, // fatalErrorSAXFunc
+    0, // getParameterEntitySAXFunc
+    &saxCharacters, // cdataBlockSAXFunc
+    0, // externalSubsetSAXFunc
+    0, // initialized
+    0, // _private
+    0, // startElementNsSAX2Func
+    0, // endElementNsSAX2Func
+    0 // xmlStructuredErrorFunc serror;
+};
+
+void simplexml_initialize(SimpleXml *simpleXml, 
+                          SimpleXmlCallback *callback, void *callbackData)
+{
+    simpleXml->callback = callback;
+    simpleXml->callbackData = callbackData;
+    simpleXml->elementPathLen = 0;
+    simpleXml->status = S3StatusOK;
+    simpleXml->xmlParser = 0;
+}
+
+
+void simplexml_deinitialize(SimpleXml *simpleXml)
+{
+    if (simpleXml->xmlParser) {
+        xmlFreeParserCtxt(simpleXml->xmlParser);
+    }
+}
+
+
+S3Status simplexml_add(SimpleXml *simpleXml, const char *data, int dataLen)
+{
+    if (!simpleXml->xmlParser &&
+        (!(simpleXml->xmlParser = xmlCreatePushParserCtxt
+           (&saxHandlerG, simpleXml, 0, 0, 0)))) {
+        return S3StatusInternalError;
+    }
+
+    if (xmlParseChunk((xmlParserCtxtPtr) simpleXml->xmlParser, 
+                      data, dataLen, 0)) {
+        return S3StatusXmlParseFailure;
+    }
+
+    return simpleXml->status;
+}

+ 87 - 0
libs/libs3/src/testsimplexml.c

@@ -0,0 +1,87 @@
+/** **************************************************************************
+ * testsimplexml.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+#include "simplexml.h"
+
+static S3Status simpleXmlCallback(const char *elementPath, const char *data,
+                                  int dataLen, void *callbackData)
+{
+    (void) callbackData;
+
+    printf("[%s]: [%.*s]\n", elementPath, dataLen, data);
+
+    return S3StatusOK;
+}
+
+
+// The only argument allowed is a specification of the random seed to use
+int main(int argc, char **argv)
+{
+    if (argc > 1) {
+        char *arg = argv[1];
+        int seed = 0;
+        while (*arg) {
+            seed *= 10;
+            seed += (*arg++ - '0');
+        }
+        
+        srand(seed);
+    }
+    else {
+        srand(time(0));
+    }
+
+    SimpleXml simpleXml;
+
+    simplexml_initialize(&simpleXml, &simpleXmlCallback, 0);
+
+    // Read chunks of 10K from stdin, and then feed them in random chunks
+    // to simplexml_add
+    char inbuf[10000];
+
+    int amt_read;
+    while ((amt_read = fread(inbuf, 1, sizeof(inbuf), stdin)) > 0) {
+        char *buf = inbuf;
+        while (amt_read) {
+            int amt = (rand() % amt_read) + 1;
+            S3Status status = simplexml_add(&simpleXml, buf, amt);
+            if (status != S3StatusOK) {
+                fprintf(stderr, "ERROR: Parse failure: %d\n", status);
+                simplexml_deinitialize(&simpleXml);
+                return -1;
+            }
+            buf += amt, amt_read -= amt;
+        }
+    }
+
+    simplexml_deinitialize(&simpleXml);
+
+    return 0;
+}

+ 175 - 0
libs/libs3/src/util.c

@@ -0,0 +1,175 @@
+/** **************************************************************************
+ * util.c
+ * 
+ * Copyright 2008 Bryan Ischo <[email protected]>
+ * 
+ * This file is part of libs3.
+ * 
+ * libs3 is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation, version 3 of the License.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of this library and its programs with the
+ * OpenSSL library, and distribute linked combinations including the two.
+ *
+ * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * version 3 along with libs3, in a file named COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ ************************************************************************** **/
+
+#include <ctype.h>
+#include <string.h>
+#include "util.h"
+
+
+// Convenience utility for making the code look nicer.  Tests a string
+// against a format; only the characters specified in the format are
+// checked (i.e. if the string is longer than the format, the string still
+// checks out ok).  Format characters are:
+// d - is a digit
+// anything else - is that character
+// Returns nonzero the string checks out, zero if it does not.
+static int checkString(const char *str, const char *format)
+{
+    while (*format) {
+        if (*format == 'd') {
+            if (!isdigit(*str)) {
+                return 0;
+            }
+        }
+        else if (*str != *format) {
+            return 0;
+        }
+        str++, format++;
+    }
+
+    return 1;
+}
+
+/*
+ * Encode rules:
+ * 1. Every byte except: 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'
+ * 2. The space must be encoded as "%20" (and not as "+")
+ * 3. Letters in the hexadecimal value must be uppercase, for example "%1A"
+ * 4. Encode the forward slash character, '/', everywhere except in the object key name
+ */
+int urlEncode(char *dest, const char *src, int maxSrcSize, int encodeSlash)
+{
+    static const char *hex = "0123456789ABCDEF";
+
+    int len = 0;
+
+    if (src) while (*src) {
+        if (++len > maxSrcSize) {
+            *dest = 0;
+            return 0;
+        }
+        unsigned char c = *src;
+        if (isalnum(c) ||
+            (c == '-') || (c == '_') || (c == '.') ||
+            (c == '~') || (c == '/' && !encodeSlash)) {
+            *dest++ = c;
+        }
+        else {
+            *dest++ = '%';
+            *dest++ = hex[c >> 4];
+            *dest++ = hex[c & 15];
+        }
+        src++;
+    }
+
+    *dest = 0;
+
+    return 1;
+}
+
+
+int64_t parseIso8601Time(const char *str)
+{
+    // Check to make sure that it has a valid format
+    if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
+        return -1;
+    }
+
+#define nextnum() (((*str - '0') * 10) + (*(str + 1) - '0'))
+
+    // Convert it
+    struct tm stm;
+    memset(&stm, 0, sizeof(stm));
+
+    stm.tm_year = (nextnum() - 19) * 100;
+    str += 2;
+    stm.tm_year += nextnum();
+    str += 3;
+
+    stm.tm_mon = nextnum() - 1;
+    str += 3;
+
+    stm.tm_mday = nextnum();
+    str += 3;
+
+    stm.tm_hour = nextnum();
+    str += 3;
+
+    stm.tm_min = nextnum();
+    str += 3;
+
+    stm.tm_sec = nextnum();
+    str += 2;
+
+    stm.tm_isdst = -1;
+
+    int64_t ret = mktime(&stm);
+
+    // Skip the millis
+
+    if (*str == '.') {
+        str++;
+        while (isdigit(*str)) {
+            str++;
+        }
+    }
+
+    if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
+        int sign = (*str++ == '-') ? -1 : 1;
+        int hours = nextnum();
+        str += 3;
+        int minutes = nextnum();
+        ret += (-sign * (((hours * 60) + minutes) * 60));
+    }
+    // Else it should be Z to be a conformant time string, but we just assume
+    // that it is rather than enforcing that
+
+    return ret;
+}
+
+
+uint64_t parseUnsignedInt(const char *str)
+{
+    // Skip whitespace
+    while (is_blank(*str)) {
+        str++;
+    }
+
+    uint64_t ret = 0;
+
+    while (isdigit(*str)) {
+        ret *= 10;
+        ret += (*str++ - '0');
+    }
+
+    return ret;
+}
+
+
+int is_blank(char c)
+{
+    return ((c == ' ') || (c == '\t'));
+}

+ 105 - 0
libs/libs3/test/badxml_01.xml

@@ -0,0 +1,105 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- each elementxx is 9 characters long, + slash gives 10 characters -->
+<element00>
+<element01>
+<element02>
+<element03>
+<element04>
+<element05>
+<element06>
+<element07>
+<element08>
+<element09>
+<element10>
+<element11>
+<element12>
+<element13>
+<element14>
+<element15>
+<element16>
+<element17>
+<element18>
+<element19>
+<element20>
+<element21>
+<element22>
+<element23>
+<element24>
+<element25>
+<element26>
+<element27>
+<element28>
+<element29>
+<element30>
+<element31>
+<element32>
+<element33>
+<element34>
+<element35>
+<element36>
+<element37>
+<element38>
+<element39>
+<element40>
+<element41>
+<element42>
+<element43>
+<element44>
+<element45>
+<element46>
+<element47>
+<element48>
+<element49>
+<element50xxx>
+Data
+</element50xxx>
+</element49>
+</element48>
+</element47>
+</element46>
+</element45>
+</element44>
+</element43>
+</element42>
+</element41>
+</element40>
+</element39>
+</element38>
+</element37>
+</element36>
+</element35>
+</element34>
+</element33>
+</element32>
+</element31>
+</element30>
+</element29>
+</element28>
+</element27>
+</element26>
+</element25>
+</element24>
+</element23>
+</element22>
+</element21>
+</element20>
+</element19>
+</element18>
+</element17>
+</element16>
+</element15>
+</element14>
+</element13>
+</element12>
+</element11>
+</element10>
+</element09>
+</element08>
+</element07>
+</element06>
+</element05>
+</element04>
+</element03>
+</element02>
+</element01>
+</element00>

+ 7 - 0
libs/libs3/test/goodxml_01.xml

@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+  <Code>NoSuchKey</Code>
+  <Message> The resource <![CDATA[<now> & then]]> you requested does not exist &amp; so there  </Message>
+  <Resource>/mybucket/myfoto.jpg</Resource> 
+  <RequestId>4442587FB7D0A2F9</RequestId>
+</Error>

+ 105 - 0
libs/libs3/test/goodxml_02.xml

@@ -0,0 +1,105 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- each elementxx is 9 characters long, + slash gives 10 characters -->
+<element00>
+<element01>
+<element02>
+<element03>
+<element04>
+<element05>
+<element06>
+<element07>
+<element08>
+<element09>
+<element10>
+<element11>
+<element12>
+<element13>
+<element14>
+<element15>
+<element16>
+<element17>
+<element18>
+<element19>
+<element20>
+<element21>
+<element22>
+<element23>
+<element24>
+<element25>
+<element26>
+<element27>
+<element28>
+<element29>
+<element30>
+<element31>
+<element32>
+<element33>
+<element34>
+<element35>
+<element36>
+<element37>
+<element38>
+<element39>
+<element40>
+<element41>
+<element42>
+<element43>
+<element44>
+<element45>
+<element46>
+<element47>
+<element48>
+<element49>
+<element50xx>
+Data
+</element50xx>
+</element49>
+</element48>
+</element47>
+</element46>
+</element45>
+</element44>
+</element43>
+</element42>
+</element41>
+</element40>
+</element39>
+</element38>
+</element37>
+</element36>
+</element35>
+</element34>
+</element33>
+</element32>
+</element31>
+</element30>
+</element29>
+</element28>
+</element27>
+</element26>
+</element25>
+</element24>
+</element23>
+</element22>
+</element21>
+</element20>
+</element19>
+</element18>
+</element17>
+</element16>
+</element15>
+</element14>
+</element13>
+</element12>
+</element11>
+</element10>
+</element09>
+</element08>
+</element07>
+</element06>
+</element05>
+</element04>
+</element03>
+</element02>
+</element01>
+</element00>

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 2 - 0
libs/libs3/test/goodxml_03.xml


+ 233 - 0
libs/libs3/test/test.sh

@@ -0,0 +1,233 @@
+#!/bin/sh
+
+# Environment:
+# S3_ACCESS_KEY_ID - must be set to S3 Access Key ID
+# S3_SECRET_ACCESS_KEY - must be set to S3 Secret Access Key
+# TEST_BUCKET_PREFIX - must be set to the test bucket prefix to use
+# S3_COMMAND - may be set to s3 command to use, examples:
+#              "valgrind s3"
+#              "s3 -h" (for aws s3)
+#              default: "s3"
+
+if [ -z "$S3_ACCESS_KEY_ID" ]; then
+    echo "S3_ACCESS_KEY_ID required"
+    exit -1;
+fi
+
+if [ -z "$S3_SECRET_ACCESS_KEY" ]; then
+    echo "S3_SECRET_ACCESS_KEY required"
+    exit -1;
+fi
+
+if [ -z "$TEST_BUCKET_PREFIX" ]; then
+    echo "TEST_BUCKET_PREFIX required"
+    exit -1;
+fi
+
+if [ -z "$S3_COMMAND" ]; then
+    S3_COMMAND=s3
+fi
+
+failures=0
+
+TEST_BUCKET=${TEST_BUCKET_PREFIX}.testbucket
+
+# Create the test bucket
+echo "$S3_COMMAND create $TEST_BUCKET"
+$S3_COMMAND create $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# List to find it
+echo "$S3_COMMAND list | grep $TEST_BUCKET"
+$S3_COMMAND list | grep $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Test it
+echo "$S3_COMMAND test $TEST_BUCKET"
+$S3_COMMAND test $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# List to ensure that it is empty
+echo "$S3_COMMAND list $TEST_BUCKET"
+$S3_COMMAND list $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Put some data
+rm -f seqdata
+seq 1 10000 > seqdata
+echo "$S3_COMMAND put $TEST_BUCKET/testkey filename=seqdata noStatus=1"
+$S3_COMMAND put $TEST_BUCKET/testkey filename=seqdata noStatus=1
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+rm -f testkey
+# Get the data and make sure that it matches
+echo "$S3_COMMAND get $TEST_BUCKET/testkey filename=testkey"
+$S3_COMMAND get $TEST_BUCKET/testkey filename=testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff seqdata testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+rm -f seqdata testkey
+
+# Delete the file
+echo "$S3_COMMAND delete $TEST_BUCKET/testkey"
+$S3_COMMAND delete $TEST_BUCKET/testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Remove the test bucket
+echo "$S3_COMMAND delete $TEST_BUCKET"
+$S3_COMMAND delete $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Make sure it's not there
+echo "$S3_COMMAND list | grep $TEST_BUCKET"
+$S3_COMMAND list | grep $TEST_BUCKET
+failures=$(($failures + (($? == 1) ? 0 : 1)))
+
+# Now create it again
+echo "$S3_COMMAND create $TEST_BUCKET"
+$S3_COMMAND create $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Put 10 files in it
+for i in `seq 0 9`; do
+    echo "echo \"Hello\" | $S3_COMMAND put $TEST_BUCKET/key_$i"
+    echo "Hello" | $S3_COMMAND put $TEST_BUCKET/key_$i
+    failures=$(($failures + (($? == 0) ? 0 : 1)))
+done
+
+# List with all details
+echo "$S3_COMMAND list $TEST_BUCKET"
+$S3_COMMAND list $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+COPY_BUCKET=${TEST_BUCKET_PREFIX}.copybucket
+
+# Create another test bucket and copy a file into it
+echo "$S3_COMMAND create $COPY_BUCKET"
+$S3_COMMAND create $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+cat <<EOF
+$S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
+EOF
+$S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# List the copy bucket
+echo "$S3_COMMAND list $COPY_BUCKET"
+$S3_COMMAND list $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Compare the files
+rm -f key_5 copykey
+echo "$S3_COMMAND get $TEST_BUCKET/key_5 filename=key_5"
+$S3_COMMAND get $TEST_BUCKET/key_5 filename=key_5
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+echo "$S3_COMMAND get $COPY_BUCKET/copykey filename=copykey"
+$S3_COMMAND get $COPY_BUCKET/copykey filename=copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff key_5 copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+rm -f key_5 copykey
+
+# Delete the files
+for i in `seq 0 9`; do
+    echo "$S3_COMMAND delete $TEST_BUCKET/key_$i"
+    $S3_COMMAND delete $TEST_BUCKET/key_$i
+    failures=$(($failures + (($? == 0) ? 0 : 1)))
+done
+echo "$S3_COMMAND delete $COPY_BUCKET/copykey"
+$S3_COMMAND delete $COPY_BUCKET/copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Delete the copy bucket
+echo "$S3_COMMAND delete $COPY_BUCKET"
+$S3_COMMAND delete $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Now create a new zero-length file
+echo "$S3_COMMAND put $TEST_BUCKET/aclkey < /dev/null"
+$S3_COMMAND put $TEST_BUCKET/aclkey < /dev/null
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Get the bucket acl
+rm -f acl
+echo "$S3_COMMAND getacl $TEST_BUCKET filename=acl"
+$S3_COMMAND getacl $TEST_BUCKET filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Add READ for all AWS users, and READ_ACP for everyone
+cat <<EOF >> acl
+Group   Authenticated AWS Users                                                                     READ        
+EOF
+cat <<EOF >> acl
+Group   All Users                                                                                   READ_ACP    
+EOF
+echo "$S3_COMMAND setacl $TEST_BUCKET filename=acl"
+$S3_COMMAND setacl $TEST_BUCKET filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Test to make sure that it worked
+rm -f acl_new
+echo "$S3_COMMAND getacl $TEST_BUCKET filename=acl_new"
+$S3_COMMAND getacl $TEST_BUCKET filename=acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff -B acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+rm -f acl acl_new
+
+# Get the key acl
+rm -f acl
+echo "$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl"
+$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Add READ for all AWS users, and READ_ACP for everyone
+cat <<EOF >> acl
+Group   Authenticated AWS Users                                                                     READ        
+EOF
+cat <<EOF >> acl
+Group   All Users                                                                                   READ_ACP    
+EOF
+echo "$S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl"
+$S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+# Test to make sure that it worked
+rm -f acl_new
+echo "$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl_new"
+$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff -B acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+rm -f acl acl_new
+
+# Check multipart file upload (>15MB)
+dd if=/dev/zero of=mpfile bs=1024k count=30
+echo "$S3_COMMAND put $TEST_BUCKET/mpfile filename=mpfile"
+$S3_COMMAND put $TEST_BUCKET/mpfile filename=mpfile
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+echo "$S3_COMMAND get $TEST_BUCKET/mpfile filename=mpfile.get"
+$S3_COMMAND get $TEST_BUCKET/mpfile filename=mpfile.get
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff mpfile mpfile.get
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+rm -f mpfile mpfile.get
+
+# Remove the test files
+echo "$S3_COMMAND delete $TEST_BUCKET/mpfile"
+$S3_COMMAND delete $TEST_BUCKET/mpfile
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+echo "$S3_COMMAND delete $TEST_BUCKET/aclkey"
+$S3_COMMAND delete $TEST_BUCKET/aclkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+echo "$S3_COMMAND delete $TEST_BUCKET"
+$S3_COMMAND delete $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+if [ ${failures} = 0 ]; then
+    echo "all tests completed successfully"
+else
+    echo "tests completed with ${failures} failures"
+fi
+
+exit ${failures}

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio