Ver Fonte

Merge pull request #14 from orchardup/vendor-docker-py

Vendor docker-py
Ben Firshman há 11 anos atrás
pai
commit
b4c905dc83

+ 1 - 1
fig/cli/command.py

@@ -1,6 +1,6 @@
 from __future__ import unicode_literals
 from __future__ import absolute_import
-from docker import Client
+from ..packages.docker import Client
 import errno
 import logging
 import os

+ 1 - 1
fig/cli/main.py

@@ -15,7 +15,7 @@ from .formatter import Formatter
 from .log_printer import LogPrinter
 from .utils import yesno
 
-from docker.client import APIError
+from ..packages.docker.client import APIError
 from .errors import UserError
 from .docopt_command import NoSuchCommand
 from .socketclient import SocketClient

+ 0 - 0
fig/packages/__init__.py


+ 15 - 0
fig/packages/docker/__init__.py

@@ -0,0 +1,15 @@
+# Copyright 2013 dotCloud inc.
+
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+
+#        http://www.apache.org/licenses/LICENSE-2.0
+
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from .client import Client, APIError  # flake8: noqa

+ 7 - 0
fig/packages/docker/auth/__init__.py

@@ -0,0 +1,7 @@
+from .auth import (
+    INDEX_URL,
+    encode_header,
+    load_config,
+    resolve_authconfig,
+    resolve_repository_name
+)  # flake8: noqa

+ 153 - 0
fig/packages/docker/auth/auth.py

@@ -0,0 +1,153 @@
+# Copyright 2013 dotCloud inc.
+
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+
+#        http://www.apache.org/licenses/LICENSE-2.0
+
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import base64
+import fileinput
+import json
+import os
+
+import six
+
+from ..utils import utils
+
+INDEX_URL = 'https://index.docker.io/v1/'
+DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+
+def swap_protocol(url):
+    if url.startswith('http://'):
+        return url.replace('http://', 'https://', 1)
+    if url.startswith('https://'):
+        return url.replace('https://', 'http://', 1)
+    return url
+
+
+def expand_registry_url(hostname):
+    if hostname.startswith('http:') or hostname.startswith('https:'):
+        if '/' not in hostname[9:]:
+            hostname = hostname + '/v1/'
+        return hostname
+    if utils.ping('https://' + hostname + '/v1/_ping'):
+        return 'https://' + hostname + '/v1/'
+    return 'http://' + hostname + '/v1/'
+
+
+def resolve_repository_name(repo_name):
+    if '://' in repo_name:
+        raise ValueError('Repository name cannot contain a '
+                         'scheme ({0})'.format(repo_name))
+    parts = repo_name.split('/', 1)
+    if not '.' in parts[0] and not ':' in parts[0] and parts[0] != 'localhost':
+        # This is a docker index repo (ex: foo/bar or ubuntu)
+        return INDEX_URL, repo_name
+    if len(parts) < 2:
+        raise ValueError('Invalid repository name ({0})'.format(repo_name))
+
+    if 'index.docker.io' in parts[0]:
+        raise ValueError('Invalid repository name,'
+                         'try "{0}" instead'.format(parts[1]))
+
+    return expand_registry_url(parts[0]), parts[1]
+
+
+def resolve_authconfig(authconfig, registry=None):
+    """Return the authentication data from the given auth configuration for a
+    specific registry. We'll do our best to infer the correct URL for the
+    registry, trying both http and https schemes. Returns an empty dictionnary
+    if no data exists."""
+    # Default to the public index server
+    registry = registry or INDEX_URL
+
+    # Ff its not the index server there are three cases:
+    #
+    # 1. this is a full config url -> it should be used as is
+    # 2. it could be a full url, but with the wrong protocol
+    # 3. it can be the hostname optionally with a port
+    #
+    # as there is only one auth entry which is fully qualified we need to start
+    # parsing and matching
+    if '/' not in registry:
+        registry = registry + '/v1/'
+    if not registry.startswith('http:') and not registry.startswith('https:'):
+        registry = 'https://' + registry
+
+    if registry in authconfig:
+        return authconfig[registry]
+    return authconfig.get(swap_protocol(registry), None)
+
+
+def decode_auth(auth):
+    if isinstance(auth, six.string_types):
+        auth = auth.encode('ascii')
+    s = base64.b64decode(auth)
+    login, pwd = s.split(b':')
+    return login.decode('ascii'), pwd.decode('ascii')
+
+
+def encode_header(auth):
+    auth_json = json.dumps(auth).encode('ascii')
+    return base64.b64encode(auth_json)
+
+
+def load_config(root=None):
+    """Loads authentication data from a Docker configuration file in the given
+    root directory."""
+    conf = {}
+    data = None
+
+    config_file = os.path.join(root or os.environ.get('HOME', '.'),
+                               DOCKER_CONFIG_FILENAME)
+
+    # First try as JSON
+    try:
+        with open(config_file) as f:
+            conf = {}
+            for registry, entry in six.iteritems(json.load(f)):
+                username, password = decode_auth(entry['auth'])
+                conf[registry] = {
+                    'username': username,
+                    'password': password,
+                    'email': entry['email'],
+                    'serveraddress': registry,
+                }
+            return conf
+    except:
+        pass
+
+    # If that fails, we assume the configuration file contains a single
+    # authentication token for the public registry in the following format:
+    #
+    # auth = AUTH_TOKEN
+    # email = [email protected]
+    try:
+        data = []
+        for line in fileinput.input(config_file):
+            data.append(line.strip().split(' = ')[1])
+        if len(data) < 2:
+            # Not enough data
+            raise Exception('Invalid or empty configuration file!')
+
+        username, password = decode_auth(data[0])
+        conf[INDEX_URL] = {
+            'username': username,
+            'password': password,
+            'email': data[1],
+            'serveraddress': INDEX_URL,
+        }
+        return conf
+    except:
+        pass
+
+    # If all fails, return an empty config
+    return {}

+ 746 - 0
fig/packages/docker/client.py

@@ -0,0 +1,746 @@
+# Copyright 2013 dotCloud inc.
+
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+
+#        http://www.apache.org/licenses/LICENSE-2.0
+
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import json
+import re
+import shlex
+import struct
+
+import requests
+import requests.exceptions
+import six
+
+from .auth import auth
+from .unixconn import unixconn
+from .utils import utils
+
+if not six.PY3:
+    import websocket
+
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+
+
+class APIError(requests.exceptions.HTTPError):
+    def __init__(self, message, response, explanation=None):
+        super(APIError, self).__init__(message, response=response)
+
+        self.explanation = explanation
+
+        if self.explanation is None and response.content:
+            self.explanation = response.content.strip()
+
+    def __str__(self):
+        message = super(APIError, self).__str__()
+
+        if self.is_client_error():
+            message = '%s Client Error: %s' % (
+                self.response.status_code, self.response.reason)
+
+        elif self.is_server_error():
+            message = '%s Server Error: %s' % (
+                self.response.status_code, self.response.reason)
+
+        if self.explanation:
+            message = '%s ("%s")' % (message, self.explanation)
+
+        return message
+
+    def is_client_error(self):
+        return 400 <= self.response.status_code < 500
+
+    def is_server_error(self):
+        return 500 <= self.response.status_code < 600
+
+
+class Client(requests.Session):
+    def __init__(self, base_url=None, version="1.6",
+                 timeout=DEFAULT_TIMEOUT_SECONDS):
+        super(Client, self).__init__()
+        if base_url is None:
+            base_url = "unix://var/run/docker.sock"
+        if base_url.startswith('unix:///'):
+            base_url = base_url.replace('unix:/', 'unix:')
+        if base_url.startswith('tcp:'):
+            base_url = base_url.replace('tcp:', 'http:')
+        if base_url.endswith('/'):
+            base_url = base_url[:-1]
+        self.base_url = base_url
+        self._version = version
+        self._timeout = timeout
+        self._auth_configs = auth.load_config()
+
+        self.mount('unix://', unixconn.UnixAdapter(base_url, timeout))
+
+    def _set_request_timeout(self, kwargs):
+        """Prepare the kwargs for an HTTP request by inserting the timeout
+        parameter, if not already present."""
+        kwargs.setdefault('timeout', self._timeout)
+        return kwargs
+
+    def _post(self, url, **kwargs):
+        return self.post(url, **self._set_request_timeout(kwargs))
+
+    def _get(self, url, **kwargs):
+        return self.get(url, **self._set_request_timeout(kwargs))
+
+    def _delete(self, url, **kwargs):
+        return self.delete(url, **self._set_request_timeout(kwargs))
+
+    def _url(self, path):
+        return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
+
+    def _raise_for_status(self, response, explanation=None):
+        """Raises stored :class:`APIError`, if one occurred."""
+        try:
+            response.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            raise APIError(e, response, explanation=explanation)
+
+    def _result(self, response, json=False, binary=False):
+        assert not (json and binary)
+        self._raise_for_status(response)
+
+        if json:
+            return response.json()
+        if binary:
+            return response.content
+        return response.text
+
+    def _container_config(self, image, command, hostname=None, user=None,
+                          detach=False, stdin_open=False, tty=False,
+                          mem_limit=0, ports=None, environment=None, dns=None,
+                          volumes=None, volumes_from=None,
+                          network_disabled=False):
+        if isinstance(command, six.string_types):
+            command = shlex.split(str(command))
+        if isinstance(environment, dict):
+            environment = [
+                '{0}={1}'.format(k, v) for k, v in environment.items()
+            ]
+
+        if ports and isinstance(ports, list):
+            exposed_ports = {}
+            for port_definition in ports:
+                port = port_definition
+                proto = None
+                if isinstance(port_definition, tuple):
+                    if len(port_definition) == 2:
+                        proto = port_definition[1]
+                    port = port_definition[0]
+                exposed_ports['{0}{1}'.format(
+                    port,
+                    '/' + proto if proto else ''
+                )] = {}
+            ports = exposed_ports
+
+        if volumes and isinstance(volumes, list):
+            volumes_dict = {}
+            for vol in volumes:
+                volumes_dict[vol] = {}
+            volumes = volumes_dict
+
+        attach_stdin = False
+        attach_stdout = False
+        attach_stderr = False
+
+        if not detach:
+            attach_stdout = True
+            attach_stderr = True
+
+            if stdin_open:
+                attach_stdin = True
+
+        return {
+            'Hostname':     hostname,
+            'ExposedPorts': ports,
+            'User':         user,
+            'Tty':          tty,
+            'OpenStdin':    stdin_open,
+            'Memory':       mem_limit,
+            'AttachStdin':  attach_stdin,
+            'AttachStdout': attach_stdout,
+            'AttachStderr': attach_stderr,
+            'Env':          environment,
+            'Cmd':          command,
+            'Dns':          dns,
+            'Image':        image,
+            'Volumes':      volumes,
+            'VolumesFrom':  volumes_from,
+            'NetworkDisabled': network_disabled
+        }
+
+    def _post_json(self, url, data, **kwargs):
+        # Go <1.1 can't unserialize null to a string
+        # so we do this disgusting thing here.
+        data2 = {}
+        if data is not None:
+            for k, v in six.iteritems(data):
+                if v is not None:
+                    data2[k] = v
+
+        if 'headers' not in kwargs:
+            kwargs['headers'] = {}
+        kwargs['headers']['Content-Type'] = 'application/json'
+        return self._post(url, data=json.dumps(data2), **kwargs)
+
+    def _attach_params(self, override=None):
+        return override or {
+            'stdout': 1,
+            'stderr': 1,
+            'stream': 1
+        }
+
+    def _attach_websocket(self, container, params=None):
+        if six.PY3:
+            raise NotImplementedError("This method is not currently supported "
+                                      "under python 3")
+        url = self._url("/containers/{0}/attach/ws".format(container))
+        req = requests.Request("POST", url, params=self._attach_params(params))
+        full_url = req.prepare().url
+        full_url = full_url.replace("http://", "ws://", 1)
+        full_url = full_url.replace("https://", "wss://", 1)
+        return self._create_websocket_connection(full_url)
+
+    def _create_websocket_connection(self, url):
+        return websocket.create_connection(url)
+
+    def _stream_result(self, response):
+        """Generator for straight-out, non chunked-encoded HTTP responses."""
+        self._raise_for_status(response)
+        for line in response.iter_lines(chunk_size=1):
+            # filter out keep-alive new lines
+            if line:
+                yield line + '\n'
+
+    def _stream_result_socket(self, response):
+        self._raise_for_status(response)
+        return response.raw._fp.fp._sock
+
+    def _stream_helper(self, response):
+        """Generator for data coming from a chunked-encoded HTTP response."""
+        socket_fp = self._stream_result_socket(response)
+        socket_fp.setblocking(1)
+        socket = socket_fp.makefile()
+        while True:
+            size = int(socket.readline(), 16)
+            if size <= 0:
+                break
+            data = socket.readline()
+            if not data:
+                break
+            yield data
+
+    def _multiplexed_buffer_helper(self, response):
+        """A generator of multiplexed data blocks read from a buffered
+        response."""
+        buf = self._result(response, binary=True)
+        walker = 0
+        while True:
+            if len(buf[walker:]) < 8:
+                break
+            _, length = struct.unpack_from('>BxxxL', buf[walker:])
+            start = walker + STREAM_HEADER_SIZE_BYTES
+            end = start + length
+            walker = end
+            yield str(buf[start:end])
+
+    def _multiplexed_socket_stream_helper(self, response):
+        """A generator of multiplexed data blocks coming from a response
+        socket."""
+        socket = self._stream_result_socket(response)
+
+        def recvall(socket, size):
+            data = ''
+            while size > 0:
+                block = socket.recv(size)
+                if not block:
+                    return None
+
+                data += block
+                size -= len(block)
+            return data
+
+        while True:
+            socket.settimeout(None)
+            header = recvall(socket, STREAM_HEADER_SIZE_BYTES)
+            if not header:
+                break
+            _, length = struct.unpack('>BxxxL', header)
+            if not length:
+                break
+            data = recvall(socket, length)
+            if not data:
+                break
+            yield data
+
+    def attach(self, container, stdout=True, stderr=True,
+               stream=False, logs=False):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        params = {
+            'logs': logs and 1 or 0,
+            'stdout': stdout and 1 or 0,
+            'stderr': stderr and 1 or 0,
+            'stream': stream and 1 or 0,
+        }
+        u = self._url("/containers/{0}/attach".format(container))
+        response = self._post(u, params=params, stream=stream)
+
+        # Stream multi-plexing was introduced in API v1.6.
+        if utils.compare_version('1.6', self._version) < 0:
+            return stream and self._stream_result(response) or \
+                self._result(response, binary=True)
+
+        return stream and self._multiplexed_socket_stream_helper(response) or \
+            ''.join([x for x in self._multiplexed_buffer_helper(response)])
+
+    def attach_socket(self, container, params=None, ws=False):
+        if params is None:
+            params = {
+                'stdout': 1,
+                'stderr': 1,
+                'stream': 1
+            }
+        if ws:
+            return self._attach_websocket(container, params)
+
+        if isinstance(container, dict):
+            container = container.get('Id')
+        u = self._url("/containers/{0}/attach".format(container))
+        return self._stream_result_socket(self.post(
+            u, None, params=self._attach_params(params), stream=True))
+
+    def build(self, path=None, tag=None, quiet=False, fileobj=None,
+              nocache=False, rm=False, stream=False, timeout=None):
+        remote = context = headers = None
+        if path is None and fileobj is None:
+            raise Exception("Either path or fileobj needs to be provided.")
+
+        if fileobj is not None:
+            context = utils.mkbuildcontext(fileobj)
+        elif path.startswith(('http://', 'https://', 'git://', 'github.com/')):
+            remote = path
+        else:
+            context = utils.tar(path)
+
+        u = self._url('/build')
+        params = {
+            't': tag,
+            'remote': remote,
+            'q': quiet,
+            'nocache': nocache,
+            'rm': rm
+        }
+        if context is not None:
+            headers = {'Content-Type': 'application/tar'}
+
+        response = self._post(
+            u,
+            data=context,
+            params=params,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+        )
+
+        if context is not None:
+            context.close()
+        if stream:
+            return self._stream_result(response)
+        else:
+            output = self._result(response)
+            srch = r'Successfully built ([0-9a-f]+)'
+            match = re.search(srch, output)
+            if not match:
+                return None, output
+            return match.group(1), output
+
+    def commit(self, container, repository=None, tag=None, message=None,
+               author=None, conf=None):
+        params = {
+            'container': container,
+            'repo': repository,
+            'tag': tag,
+            'comment': message,
+            'author': author
+        }
+        u = self._url("/commit")
+        return self._result(self._post_json(u, data=conf, params=params),
+                            json=True)
+
+    def containers(self, quiet=False, all=False, trunc=True, latest=False,
+                   since=None, before=None, limit=-1):
+        params = {
+            'limit': 1 if latest else limit,
+            'all': 1 if all else 0,
+            'trunc_cmd': 1 if trunc else 0,
+            'since': since,
+            'before': before
+        }
+        u = self._url("/containers/json")
+        res = self._result(self._get(u, params=params), True)
+
+        if quiet:
+            return [{'Id': x['Id']} for x in res]
+        return res
+
+    def copy(self, container, resource):
+        res = self._post_json(
+            self._url("/containers/{0}/copy".format(container)),
+            data={"Resource": resource},
+            stream=True
+        )
+        self._raise_for_status(res)
+        return res.raw
+
+    def create_container(self, image, command=None, hostname=None, user=None,
+                         detach=False, stdin_open=False, tty=False,
+                         mem_limit=0, ports=None, environment=None, dns=None,
+                         volumes=None, volumes_from=None,
+                         network_disabled=False, name=None):
+
+        config = self._container_config(
+            image, command, hostname, user, detach, stdin_open, tty, mem_limit,
+            ports, environment, dns, volumes, volumes_from, network_disabled
+        )
+        return self.create_container_from_config(config, name)
+
+    def create_container_from_config(self, config, name=None):
+        u = self._url("/containers/create")
+        params = {
+            'name': name
+        }
+        res = self._post_json(u, data=config, params=params)
+        return self._result(res, True)
+
+    def diff(self, container):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        return self._result(self._get(self._url("/containers/{0}/changes".
+                            format(container))), True)
+
+    def events(self):
+        u = self._url("/events")
+
+        socket = self._stream_result_socket(self.get(u, stream=True))
+
+        while True:
+            chunk = socket.recv(4096)
+            if chunk:
+                # Messages come in the format of length, data, newline.
+                length, data = chunk.split("\n", 1)
+                length = int(length, 16)
+                if length > len(data):
+                    data += socket.recv(length - len(data))
+                yield json.loads(data)
+            else:
+                break
+
+    def export(self, container):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        res = self._get(self._url("/containers/{0}/export".format(container)),
+                        stream=True)
+        self._raise_for_status(res)
+        return res.raw
+
+    def history(self, image):
+        res = self._get(self._url("/images/{0}/history".format(image)))
+        self._raise_for_status(res)
+        return self._result(res)
+
+    def images(self, name=None, quiet=False, all=False, viz=False):
+        if viz:
+            return self._result(self._get(self._url("images/viz")))
+        params = {
+            'filter': name,
+            'only_ids': 1 if quiet else 0,
+            'all': 1 if all else 0,
+        }
+        res = self._result(self._get(self._url("/images/json"), params=params),
+                           True)
+        if quiet:
+            return [x['Id'] for x in res]
+        return res
+
+    def import_image(self, src, data=None, repository=None, tag=None):
+        u = self._url("/images/create")
+        params = {
+            'repo': repository,
+            'tag': tag
+        }
+        try:
+            # XXX: this is ways not optimal but the only way
+            # for now to import tarballs through the API
+            fic = open(src)
+            data = fic.read()
+            fic.close()
+            src = "-"
+        except IOError:
+            # file does not exists or not a file (URL)
+            data = None
+        if isinstance(src, six.string_types):
+            params['fromSrc'] = src
+            return self._result(self._post(u, data=data, params=params))
+
+        return self._result(self._post(u, data=src, params=params))
+
+    def info(self):
+        return self._result(self._get(self._url("/info")),
+                            True)
+
+    def insert(self, image, url, path):
+        api_url = self._url("/images/" + image + "/insert")
+        params = {
+            'url': url,
+            'path': path
+        }
+        return self._result(self._post(api_url, params=params))
+
+    def inspect_container(self, container):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        return self._result(
+            self._get(self._url("/containers/{0}/json".format(container))),
+            True)
+
+    def inspect_image(self, image_id):
+        return self._result(
+            self._get(self._url("/images/{0}/json".format(image_id))),
+            True
+        )
+
+    def kill(self, container, signal=None):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        url = self._url("/containers/{0}/kill".format(container))
+        params = {}
+        if signal is not None:
+            params['signal'] = signal
+        res = self._post(url, params=params)
+
+        self._raise_for_status(res)
+
+    def login(self, username, password=None, email=None, registry=None,
+              reauth=False):
+        # If we don't have any auth data so far, try reloading the config file
+        # one more time in case anything showed up in there.
+        if not self._auth_configs:
+            self._auth_configs = auth.load_config()
+
+        registry = registry or auth.INDEX_URL
+
+        authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+        # If we found an existing auth config for this registry and username
+        # combination, we can return it immediately unless reauth is requested.
+        if authcfg and authcfg.get('username', None) == username \
+                and not reauth:
+            return authcfg
+
+        req_data = {
+            'username': username,
+            'password': password,
+            'email': email,
+            'serveraddress': registry,
+        }
+
+        response = self._post_json(self._url('/auth'), data=req_data)
+        if response.status_code == 200:
+            self._auth_configs[registry] = req_data
+        return self._result(response, json=True)
+
+    def logs(self, container, stdout=True, stderr=True, stream=False):
+        return self.attach(
+            container,
+            stdout=stdout,
+            stderr=stderr,
+            stream=stream,
+            logs=True
+        )
+
+    def port(self, container, private_port):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        res = self._get(self._url("/containers/{0}/json".format(container)))
+        self._raise_for_status(res)
+        json_ = res.json()
+        s_port = str(private_port)
+        f_port = None
+        if s_port in json_['NetworkSettings']['PortMapping']['Udp']:
+            f_port = json_['NetworkSettings']['PortMapping']['Udp'][s_port]
+        elif s_port in json_['NetworkSettings']['PortMapping']['Tcp']:
+            f_port = json_['NetworkSettings']['PortMapping']['Tcp'][s_port]
+
+        return f_port
+
+    def pull(self, repository, tag=None, stream=False):
+        registry, repo_name = auth.resolve_repository_name(repository)
+        if repo_name.count(":") == 1:
+            repository, tag = repository.rsplit(":", 1)
+
+        params = {
+            'tag': tag,
+            'fromImage': repository
+        }
+        headers = {}
+
+        if utils.compare_version('1.5', self._version) >= 0:
+            # If we don't have any auth data so far, try reloading the config
+            # file one more time in case anything showed up in there.
+            if not self._auth_configs:
+                self._auth_configs = auth.load_config()
+            authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+
+            # Do not fail here if no atuhentication exists for this specific
+            # registry as we can have a readonly pull. Just put the header if
+            # we can.
+            if authcfg:
+                headers['X-Registry-Auth'] = auth.encode_header(authcfg)
+
+        response = self._post(self._url('/images/create'), params=params,
+                              headers=headers, stream=stream, timeout=None)
+
+        if stream:
+            return self._stream_helper(response)
+        else:
+            return self._result(response)
+
+    def push(self, repository, stream=False):
+        registry, repo_name = auth.resolve_repository_name(repository)
+        u = self._url("/images/{0}/push".format(repository))
+        headers = {}
+
+        if utils.compare_version('1.5', self._version) >= 0:
+            # If we don't have any auth data so far, try reloading the config
+            # file one more time in case anything showed up in there.
+            if not self._auth_configs:
+                self._auth_configs = auth.load_config()
+            authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+
+            # Do not fail here if no atuhentication exists for this specific
+            # registry as we can have a readonly pull. Just put the header if
+            # we can.
+            if authcfg:
+                headers['X-Registry-Auth'] = auth.encode_header(authcfg)
+
+            response = self._post_json(u, None, headers=headers, stream=stream)
+        else:
+            response = self._post_json(u, authcfg, stream=stream)
+
+        return stream and self._stream_helper(response) \
+            or self._result(response)
+
+    def remove_container(self, container, v=False, link=False):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        params = {'v': v, 'link': link}
+        res = self._delete(self._url("/containers/" + container),
+                           params=params)
+        self._raise_for_status(res)
+
+    def remove_image(self, image):
+        res = self._delete(self._url("/images/" + image))
+        self._raise_for_status(res)
+
+    def restart(self, container, timeout=10):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        params = {'t': timeout}
+        url = self._url("/containers/{0}/restart".format(container))
+        res = self._post(url, params=params)
+        self._raise_for_status(res)
+
+    def search(self, term):
+        return self._result(self._get(self._url("/images/search"),
+                                      params={'term': term}),
+                            True)
+
+    def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
+              publish_all_ports=False, links=None, privileged=False):
+        if isinstance(container, dict):
+            container = container.get('Id')
+
+        if isinstance(lxc_conf, dict):
+            formatted = []
+            for k, v in six.iteritems(lxc_conf):
+                formatted.append({'Key': k, 'Value': str(v)})
+            lxc_conf = formatted
+
+        start_config = {
+            'LxcConf': lxc_conf
+        }
+        if binds:
+            bind_pairs = [
+                '{0}:{1}'.format(host, dest) for host, dest in binds.items()
+            ]
+            start_config['Binds'] = bind_pairs
+
+        if port_bindings:
+            start_config['PortBindings'] = utils.convert_port_bindings(
+                port_bindings
+            )
+
+        start_config['PublishAllPorts'] = publish_all_ports
+
+        if links:
+            formatted_links = [
+                '{0}:{1}'.format(k, v) for k, v in sorted(six.iteritems(links))
+            ]
+
+            start_config['Links'] = formatted_links
+
+        start_config['Privileged'] = privileged
+
+        url = self._url("/containers/{0}/start".format(container))
+        res = self._post_json(url, data=start_config)
+        self._raise_for_status(res)
+
+    def stop(self, container, timeout=10):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        params = {'t': timeout}
+        url = self._url("/containers/{0}/stop".format(container))
+        res = self._post(url, params=params,
+                         timeout=max(timeout, self._timeout))
+        self._raise_for_status(res)
+
+    def tag(self, image, repository, tag=None, force=False):
+        params = {
+            'tag': tag,
+            'repo': repository,
+            'force': 1 if force else 0
+        }
+        url = self._url("/images/{0}/tag".format(image))
+        res = self._post(url, params=params)
+        self._raise_for_status(res)
+        return res.status_code == 201
+
+    def top(self, container):
+        u = self._url("/containers/{0}/top".format(container))
+        return self._result(self._get(u), True)
+
+    def version(self):
+        return self._result(self._get(self._url("/version")), True)
+
+    def wait(self, container):
+        if isinstance(container, dict):
+            container = container.get('Id')
+        url = self._url("/containers/{0}/wait".format(container))
+        res = self._post(url, timeout=None)
+        self._raise_for_status(res)
+        json_ = res.json()
+        if 'StatusCode' in json_:
+            return json_['StatusCode']
+        return -1

+ 1 - 0
fig/packages/docker/unixconn/__init__.py

@@ -0,0 +1 @@
+from .unixconn import UnixAdapter  # flake8: noqa

+ 71 - 0
fig/packages/docker/unixconn/unixconn.py

@@ -0,0 +1,71 @@
+# Copyright 2013 dotCloud inc.
+
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+
+#        http://www.apache.org/licenses/LICENSE-2.0
+
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+import six
+
+if six.PY3:
+    import http.client as httplib
+else:
+    import httplib
+import requests.adapters
+import socket
+
+try:
+    import requests.packages.urllib3.connectionpool as connectionpool
+except ImportError:
+    import urllib3.connectionpool as connectionpool
+
+
+class UnixHTTPConnection(httplib.HTTPConnection, object):
+    def __init__(self, base_url, unix_socket, timeout=60):
+        httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout)
+        self.base_url = base_url
+        self.unix_socket = unix_socket
+        self.timeout = timeout
+
+    def connect(self):
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        sock.settimeout(self.timeout)
+        sock.connect(self.base_url.replace("unix:/", ""))
+        self.sock = sock
+
+    def _extract_path(self, url):
+        #remove the base_url entirely..
+        return url.replace(self.base_url, "")
+
+    def request(self, method, url, **kwargs):
+        url = self._extract_path(self.unix_socket)
+        super(UnixHTTPConnection, self).request(method, url, **kwargs)
+
+
+class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):
+    def __init__(self, base_url, socket_path, timeout=60):
+        connectionpool.HTTPConnectionPool.__init__(self, 'localhost',
+                                                   timeout=timeout)
+        self.base_url = base_url
+        self.socket_path = socket_path
+        self.timeout = timeout
+
+    def _new_conn(self):
+        return UnixHTTPConnection(self.base_url, self.socket_path,
+                                  self.timeout)
+
+
+class UnixAdapter(requests.adapters.HTTPAdapter):
+    def __init__(self, base_url, timeout=60):
+        self.base_url = base_url
+        self.timeout = timeout
+        super(UnixAdapter, self).__init__()
+
+    def get_connection(self, socket_path, proxies=None):
+        return UnixHTTPConnectionPool(self.base_url, socket_path, self.timeout)

+ 3 - 0
fig/packages/docker/utils/__init__.py

@@ -0,0 +1,3 @@
+from .utils import (
+    compare_version, convert_port_bindings, mkbuildcontext, ping, tar
+) # flake8: noqa

+ 96 - 0
fig/packages/docker/utils/utils.py

@@ -0,0 +1,96 @@
+# Copyright 2013 dotCloud inc.
+
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+
+#        http://www.apache.org/licenses/LICENSE-2.0
+
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import io
+import tarfile
+import tempfile
+
+import requests
+import six
+
+
+def mkbuildcontext(dockerfile):
+    f = tempfile.NamedTemporaryFile()
+    t = tarfile.open(mode='w', fileobj=f)
+    if isinstance(dockerfile, io.StringIO):
+        dfinfo = tarfile.TarInfo('Dockerfile')
+        if six.PY3:
+            raise TypeError('Please use io.BytesIO to create in-memory '
+                            'Dockerfiles with Python 3')
+        else:
+            dfinfo.size = len(dockerfile.getvalue())
+    elif isinstance(dockerfile, io.BytesIO):
+        dfinfo = tarfile.TarInfo('Dockerfile')
+        dfinfo.size = len(dockerfile.getvalue())
+    else:
+        dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+    t.addfile(dfinfo, dockerfile)
+    t.close()
+    f.seek(0)
+    return f
+
+
+def tar(path):
+    f = tempfile.NamedTemporaryFile()
+    t = tarfile.open(mode='w', fileobj=f)
+    t.add(path, arcname='.')
+    t.close()
+    f.seek(0)
+    return f
+
+
+def compare_version(v1, v2):
+    return float(v2) - float(v1)
+
+
+def ping(url):
+    try:
+        res = requests.get(url)
+        return res.status >= 400
+    except Exception:
+        return False
+
+
+def _convert_port_binding(binding):
+    result = {'HostIp': '', 'HostPort': ''}
+    if isinstance(binding, tuple):
+        if len(binding) == 2:
+            result['HostPort'] = binding[1]
+            result['HostIp'] = binding[0]
+        elif isinstance(binding[0], six.string_types):
+            result['HostIp'] = binding[0]
+        else:
+            result['HostPort'] = binding[0]
+    else:
+        result['HostPort'] = binding
+
+    if result['HostPort'] is None:
+        result['HostPort'] = ''
+    else:
+        result['HostPort'] = str(result['HostPort'])
+
+    return result
+
+
+def convert_port_bindings(port_bindings):
+    result = {}
+    for k, v in six.iteritems(port_bindings):
+        key = str(k)
+        if '/' not in key:
+            key = key + '/tcp'
+        if isinstance(v, list):
+            result[key] = [_convert_port_binding(binding) for binding in v]
+        else:
+            result[key] = [_convert_port_binding(v)]
+    return result

+ 1 - 1
fig/service.py

@@ -1,6 +1,6 @@
 from __future__ import unicode_literals
 from __future__ import absolute_import
-from docker.client import APIError
+from .packages.docker.client import APIError
 import logging
 import re
 import os

+ 2 - 1
requirements.txt

@@ -1,4 +1,5 @@
-docker-py==0.2.3
+requests==1.2.3
+websocket-client==0.11.0
 docopt==0.6.1
 PyYAML==3.10
 texttable==0.8.1

+ 1 - 1
tests/testcases.py

@@ -1,6 +1,6 @@
 from __future__ import unicode_literals
 from __future__ import absolute_import
-from docker import Client
+from fig.packages.docker import Client
 from fig.service import Service
 from fig.cli.utils import docker_url
 from . import unittest