summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--system/docker/README16
-rw-r--r--system/docker/config/docker.default6
-rw-r--r--system/docker/config/docker.logrotate13
-rw-r--r--system/docker/config/rc.docker107
-rw-r--r--system/docker/docker-btrfs.patch32
-rw-r--r--system/docker/docker.SlackBuild102
-rw-r--r--system/docker/docker.info14
-rw-r--r--system/docker/doinst.sh4
-rw-r--r--system/docker/patches/0001-devicemapper-fix-zero-sized-field-access.patch88
-rw-r--r--system/docker/patches/0002-Avoid-buffering-to-tempfile-when-pushing-with-V2.patch220
-rw-r--r--system/docker/patches/0003-Refactoring-of-pullV2Tag.patch189
-rw-r--r--system/docker/patches/0004-deamon-events-use-UnixNano-and-no-goroutine.patch72
-rw-r--r--system/docker/patches/0005-vendor-update-tar-split-to-v0.9.10.patch289
-rw-r--r--system/docker/slack-desc4
14 files changed, 140 insertions, 1016 deletions
diff --git a/system/docker/README b/system/docker/README
index a2b961a519..7ffe7695f8 100644
--- a/system/docker/README
+++ b/system/docker/README
@@ -1,7 +1,8 @@
Docker is an open-source project to easily create lightweight, portable,
-self-sufficient containers from any application. The same container that
-a developer builds and tests on a laptop can run at scale, in production,
-on VMs, bare metal, OpenStack clusters, public clouds and more.
+self-sufficient containers from any application. The same container
+that a developer builds and tests on a laptop can run at scale, in
+production, on VMs, bare metal, OpenStack clusters, public clouds and
+more.
To use docker as a limited user, add your user to the 'docker' group:
@@ -23,10 +24,11 @@ and to /etc/rc.d/rc.local_shutdown (creating it if needed):
/etc/rc.d/rc.docker stop
fi
-If you are interested in enabling cgroup memory resource controll over swap as
-well, then append "swapaccount=1" to your kernel's parameters. This is often in
-/etc/lilo.conf, on the "append" variable.
+If you are interested in enabling cgroup memory resource controll over
+swap as well, then append "swapaccount=1" to your kernel's parameters.
+This is often in /etc/lilo.conf, on the "append" variable.
-NOTE: google-go-lang is only needed at compile time - not needed for runtime.
+NOTE: google-go-lang is only needed at compile time - not needed for
+runtime.
Docker doesn't support x86, only x86_64
diff --git a/system/docker/config/docker.default b/system/docker/config/docker.default
index ae2989490d..f24ae34624 100644
--- a/system/docker/config/docker.default
+++ b/system/docker/config/docker.default
@@ -1,3 +1,3 @@
-## Set defaults used by the docker daemon
-## These are flags passed after `docker -d`
-#DOCKER_OPTS=
+## Set defaults used by the docker daemon.
+## These are flags passed after `dockerd`.
+#DOCKER_OPTS=""
diff --git a/system/docker/config/docker.logrotate b/system/docker/config/docker.logrotate
index 41f96a65d4..016980dde2 100644
--- a/system/docker/config/docker.logrotate
+++ b/system/docker/config/docker.logrotate
@@ -1,8 +1,9 @@
/var/log/docker.log {
- rotate 5
- notifempty
- missingok
- size=5M
- compress
- delaycompress
+ daily
+ rotate 7
+ copytruncate
+ delaycompress
+ compress
+ notifempty
+ missingok
}
diff --git a/system/docker/config/rc.docker b/system/docker/config/rc.docker
index 0199623116..90548ca4b0 100644
--- a/system/docker/config/rc.docker
+++ b/system/docker/config/rc.docker
@@ -1,81 +1,86 @@
#!/bin/sh
-
-# Short-Description: Create lightweight, portable, self-sufficient containers.
-# Description:
-# Docker is an open-source project to easily create lightweight, portable,
-# self-sufficient containers from any application. The same container that a
-# developer builds and tests on a laptop can run at scale, in production, on
-# VMs, bare metal, OpenStack clusters, public clouds and more.
-
+#
+# Docker startup script for Slackware Linux
+#
+# Docker is an open-source project to easily create lightweight, portable,
+# self-sufficient containers from any application.
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
-BASE=docker
+BASE=dockerd
UNSHARE=/usr/bin/unshare
-DOCKER=/usr/bin/$BASE
-DOCKER_PIDFILE=/var/run/$BASE.pid
+DOCKER=/usr/bin/${BASE}
+DOCKER_PIDFILE=/var/run/${BASE}.pid
DOCKER_LOG=/var/log/docker.log
-DOCKER_OPTS=
+DOCKER_OPTS=""
-if [ -f /etc/default/$BASE ]; then
- . /etc/default/$BASE
+# Default options.
+if [ -f /etc/default/${BASE} ]; then
+ . /etc/default/${BASE}
fi
-# Check docker is present
-if [ ! -x $DOCKER ]; then
- echo "$DOCKER not present or not executable"
- exit 1
+# Check if docker is present.
+if [ ! -x ${DOCKER} ]; then
+ echo "${DOCKER} not present or not executable"
+ exit 1
fi
docker_start() {
- echo "starting $BASE ..."
- if [ -x ${DOCKER} ]; then
- # If there is an old PID file (no docker running), clean it up:
- if [ -r ${DOCKER_PIDFILE} ]; then
- if ! ps axc | grep docker 1> /dev/null 2> /dev/null ; then
- echo "Cleaning up old ${DOCKER_PIDFILE}."
- rm -f ${DOCKER_PIDFILE}
- fi
+ echo "Starting ${BASE} ..."
+ # If there is an old PID file (no dockerd running), clean it up.
+ if [ -r ${DOCKER_PIDFILE} ]; then
+ if ! ps axc | grep ${BASE} 1> /dev/null 2> /dev/null ; then
+ echo "Cleaning up old ${DOCKER_PIDFILE}."
+ rm -f ${DOCKER_PIDFILE}
fi
- nohup "${UNSHARE}" -m -- ${DOCKER} -d -p ${DOCKER_PIDFILE} ${DOCKER_OPTS} >> ${DOCKER_LOG} 2>&1 &
fi
+
+ nohup "${UNSHARE}" -m -- ${DOCKER} -p ${DOCKER_PIDFILE} ${DOCKER_OPTS} >> ${DOCKER_LOG} 2>&1 &
}
-# Stop docker:
docker_stop() {
- echo "stopping $BASE ..."
- # If there is no PID file, ignore this request...
+ echo -n "Stopping ${BASE} ..."
if [ -r ${DOCKER_PIDFILE} ]; then
- kill $(cat ${DOCKER_PIDFILE})
+ DOCKER_PID=$(cat ${DOCKER_PIDFILE})
+ kill ${DOCKER_PID}
+ while [ -d /proc/${DOCKER_PID} ]; do
+ sleep 1
+ echo -n "."
+ done
fi
+ echo " done"
}
-# Restart docker:
docker_restart() {
- docker_stop
- docker_start
+ docker_stop
+ sleep 1
+ docker_start
}
-case "$1" in
-'start')
- docker_start
- ;;
-'stop')
- docker_stop
- ;;
-'restart')
- docker_restart
- ;;
-'status')
- if [ -f ${DOCKER_PIDFILE} ] && ps -o cmd $(cat ${DOCKER_PIDFILE}) | grep -q $BASE ; then
- echo "status of $BASE: running"
+docker_status() {
+ if [ -f ${DOCKER_PIDFILE} ] && ps -o cmd $(cat ${DOCKER_PIDFILE}) | grep -q ${BASE} ; then
+ echo "Status of ${BASE}: running"
else
- echo "status of $BASE: stopped"
+ echo "Status of ${BASE}: stopped"
fi
- ;;
-*)
- echo "usage $0 start|stop|restart|status"
+}
+
+case "$1" in
+ 'start')
+ docker_start
+ ;;
+ 'stop')
+ docker_stop
+ ;;
+ 'restart')
+ docker_restart
+ ;;
+ 'status')
+ docker_status
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|status}"
esac
exit 0
diff --git a/system/docker/docker-btrfs.patch b/system/docker/docker-btrfs.patch
deleted file mode 100644
index 9d981a48a6..0000000000
--- a/system/docker/docker-btrfs.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-commit 6922f1be08111d889b0585b763b08f92d7a55e05
-Author: Tianon Gravi <admwiggin@gmail.com>
-Date: Sat Feb 1 21:40:51 2014 -0700
-
- Remove reference to <linux/btrfs.h>, and instead use <btrfs/ioctl.h> like we're supposed to (from btrfs-progs)
-
- This fixes compilation issues when btrfs.h isn't available (because we just need the relevant structs, which for userspace programs are supposed to come from btrfs-progs instead of the kernel headers).
-
- Docker-DCO-1.1-Signed-off-by: Andrew Page <admwiggin@gmail.com> (github: tianon)
-
-diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
-index a50f11f..3d27909 100644
---- a/daemon/graphdriver/btrfs/btrfs.go
-+++ b/daemon/graphdriver/btrfs/btrfs.go
-@@ -4,15 +4,11 @@ package btrfs
-
- /*
- #include <stdlib.h>
--#include <sys/ioctl.h>
--#include <linux/fs.h>
--#include <errno.h>
--#include <sys/types.h>
- #include <dirent.h>
--#include <linux/btrfs.h>
--
-+#include <btrfs/ioctl.h>
- */
- import "C"
-+
- import (
- "fmt"
- "github.com/dotcloud/docker/daemon/graphdriver"
diff --git a/system/docker/docker.SlackBuild b/system/docker/docker.SlackBuild
index dfc00c85f3..098080c734 100644
--- a/system/docker/docker.SlackBuild
+++ b/system/docker/docker.SlackBuild
@@ -2,18 +2,38 @@
# Slackware build script for docker
-# Written by Vincent Batts <vbatts@hashbangbash.com>
+# Copyright 2014-2015 Vincent Batts <vbatts@hashbangbash.com>
+# Copyright 2017 Audrius Kažukauskas <audrius@neutrino.lt>
+# All rights reserved.
+#
+# Redistribution and use of this script, with or without modification, is
+# permitted provided that the following conditions are met:
+#
+# 1. Redistributions of this script must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PRGNAM=docker
-VERSION=${VERSION:-1.8.2}
+VERSION=${VERSION:-17.03.0}
+GITHASH=${GITHASH:-60ccb22}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
-GITHASH=${GITHASH:-0a8c2e3}
+SRCVERSION=$VERSION-ce
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
- i?86) ARCH=i486 ;;
+ i?86) ARCH=i586 ;;
arm*) ARCH=arm ;;
*) ARCH=$( uname -m ) ;;
esac
@@ -24,68 +44,50 @@ TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
-if [ "$ARCH" = "i486" ]; then
- SLKCFLAGS="-O2 -march=i486 -mtune=i686"
- LIBDIRSUFFIX=""
-elif [ "$ARCH" = "i686" ]; then
- SLKCFLAGS="-O2 -march=i686 -mtune=i686"
- LIBDIRSUFFIX=""
-elif [ "$ARCH" = "x86_64" ]; then
- SLKCFLAGS="-O2 -fPIC"
- LIBDIRSUFFIX="64"
-else
- SLKCFLAGS="-O2"
- LIBDIRSUFFIX=""
-fi
-
set -e
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
-rm -rf $PRGNAM-$VERSION
-tar xvf $CWD/v${VERSION}.tar.gz || tar xvf $CWD/$PRGNAM-$VERSION.tar.gz
-cd $PRGNAM-$VERSION
+rm -rf $PRGNAM-$SRCVERSION
+tar xvf $CWD/$PRGNAM-$SRCVERSION.tar.gz
+cd $PRGNAM-$SRCVERSION
chown -R root:root .
-# the have symlinks in their testdata that goes outside the context of this build
-find . ! -type l \
+find -L . \
\( -perm 777 -o -perm 775 -o -perm 750 -o -perm 711 -o -perm 555 \
- -o -perm 511 \) -exec chmod 755 {} \; -o ! -type l \
+ -o -perm 511 \) -exec chmod 755 {} \; -o \
\( -perm 666 -o -perm 664 -o -perm 640 -o -perm 600 -o -perm 444 \
- -o -perm 440 -o -perm 400 \) -exec chmod 644 {} \;
+ -o -perm 440 -o -perm 400 \) -exec chmod 644 {} \;
-unset GOPATH
-
-# back out this commit, which causes btrfs headers to not be found on slackware
-# since btrfs-progs removes the <btrfs/ioctl.h> header.
-# https://github.com/docker/docker/commit/6922f1be08111d889b0585b763b08f92d7a55e05
-patch -p1 -R < $CWD/docker-btrfs.patch
+AUTO_GOPATH=1 \
+DOCKER_GITCOMMIT="$GITHASH" \
+DOCKER_BUILDTAGS="seccomp" \
+./hack/make.sh dynbinary
-# a couple of patches that missed the 1.8.2 release window, but are essential
-for patch in ${CWD}/patches/*.patch
-do
- patch -p1 < ${patch}
-done
+install -D -m 0755 bundles/$SRCVERSION/dynbinary-client/docker-$SRCVERSION $PKG/usr/bin/docker
+install -D -m 0755 bundles/$SRCVERSION/dynbinary-daemon/dockerd-$SRCVERSION $PKG/usr/bin/dockerd
-mkdir -p ${PKG}/usr/share/gocode/src/github.com/docker/docker
-cp -a . ${PKG}/usr/share/gocode/src/github.com/docker/docker/
+# Integrate required components.
+ln -s runc $PKG/usr/bin/docker-runc
+ln -s containerd $PKG/usr/bin/docker-containerd
+ln -s containerd-shim $PKG/usr/bin/docker-containerd-shim
+ln -s ctr $PKG/usr/bin/docker-containerd-ctr
+ln -s tini-static $PKG/usr/bin/docker-init
-AUTO_GOPATH=1 \
-DOCKER_GITCOMMIT="$GITHASH" \
- ./hack/make.sh dynbinary
+# Completion.
+install -D -m 0644 contrib/completion/bash/docker $PKG/usr/share/bash-completion/completions/docker
+install -D -m 0644 contrib/completion/zsh/_docker $PKG/usr/share/zsh/site-functions/_docker
+install -D -m 0644 contrib/completion/fish/docker.fish $PKG/usr/share/fish/vendor_completions.d/docker.fish
-# do not strip these binaries. they have a SHA1 baked into them.
-mkdir -p ${PKG}/usr/libexec/docker ${PKG}/usr/bin
-mv bundles/${VERSION}/dynbinary/dockerinit-${VERSION} ${PKG}/usr/libexec/docker/dockerinit
-mv bundles/${VERSION}/dynbinary/docker-${VERSION} ${PKG}/usr/bin/docker
+install -D -m 0644 $CWD/config/docker.default $PKG/etc/default/docker.new
+install -D -m 0644 $CWD/config/docker.logrotate $PKG/etc/logrotate.d/docker.new
+install -D -m 0755 $CWD/config/rc.docker $PKG/etc/rc.d/rc.docker.new
-install -D --mode 0644 $CWD/config/docker.default $PKG/etc/default/docker.new
-install -D --mode 0644 $CWD/config/docker.logrotate $PKG/etc/logrotate.d/docker.new
-install -D --mode 0755 $CWD/config/rc.docker $PKG/etc/rc.d/rc.docker.new
+# Install udev rules.
+install -D -m 0644 contrib/udev/80-docker.rules $PKG/lib/udev/rules.d/80-docker.rules
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
-cp -a AUTHORS CONTRIBUTING.md CHANGELOG.md LICENSE README.md NOTICE VERSION \
- $PKG/usr/doc/$PRGNAM-$VERSION
+cp -a AUTHORS CHANGELOG.md LICENSE NOTICE README.md $PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
diff --git a/system/docker/docker.info b/system/docker/docker.info
index 688dc88d97..5cfda17b99 100644
--- a/system/docker/docker.info
+++ b/system/docker/docker.info
@@ -1,10 +1,10 @@
PRGNAM="docker"
-VERSION="1.8.2"
-HOMEPAGE="https://docker.io/"
+VERSION="17.03.0"
+HOMEPAGE="https://dockerproject.org/"
DOWNLOAD="UNSUPPORTED"
MD5SUM=""
-DOWNLOAD_x86_64="https://github.com/docker/docker/archive/v1.8.2.tar.gz"
-MD5SUM_x86_64="4faf25b356900f3e7599783ad4565e69"
-REQUIRES="google-go-lang"
-MAINTAINER="Vincent Batts"
-EMAIL="vbatts@hashbangbash.com"
+DOWNLOAD_x86_64="https://github.com/docker/docker/archive/v17.03.0-ce/docker-17.03.0-ce.tar.gz"
+MD5SUM_x86_64="ae104d1f25766d217bda5f23cf11c90b"
+REQUIRES="containerd docker-proxy tini"
+MAINTAINER="Audrius Kažukauskas"
+EMAIL="audrius@neutrino.lt"
diff --git a/system/docker/doinst.sh b/system/docker/doinst.sh
index 56e6b591a2..aec13bd8a9 100644
--- a/system/docker/doinst.sh
+++ b/system/docker/doinst.sh
@@ -26,3 +26,7 @@ preserve_perms etc/rc.d/rc.docker.new
config etc/default/docker.new
config etc/logrotate.d/docker.new
+if [ -x /sbin/udevadm ]; then
+ /sbin/udevadm control --reload-rules
+fi
+
diff --git a/system/docker/patches/0001-devicemapper-fix-zero-sized-field-access.patch b/system/docker/patches/0001-devicemapper-fix-zero-sized-field-access.patch
deleted file mode 100644
index 60d95db7c9..0000000000
--- a/system/docker/patches/0001-devicemapper-fix-zero-sized-field-access.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From f7236a195c84687edb74fec28b6c4cc98e34185c Mon Sep 17 00:00:00 2001
-From: Vincent Batts <vbatts@redhat.com>
-Date: Fri, 7 Aug 2015 10:18:20 -0400
-Subject: [PATCH 1/4] devicemapper: fix zero-sized field access
-
-Fixes: #15279
-
-Due to
-https://github.com/golang/go/commit/7904946eeb35faece61bbf6f5b3cc8be2f519c17
-the devices field is dropped.
-
-This solution works on go1.4 and go1.5
-
-Signed-off-by: Vincent Batts <vbatts@redhat.com>
----
- daemon/graphdriver/devmapper/deviceset.go | 14 +++++++++-----
- pkg/devicemapper/devmapper_wrapper.go | 18 +++++++++++++++---
- 2 files changed, 24 insertions(+), 8 deletions(-)
-
-diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
-index 2eee330..a80736a 100644
---- a/daemon/graphdriver/devmapper/deviceset.go
-+++ b/daemon/graphdriver/devmapper/deviceset.go
-@@ -1482,12 +1482,16 @@ func (devices *DeviceSet) deactivatePool() error {
- if err != nil {
- return err
- }
-- if d, err := devicemapper.GetDeps(devname); err == nil {
-- // Access to more Debug output
-- logrus.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d)
-+
-+ if devinfo.Exists == 0 {
-+ return nil
- }
-- if devinfo.Exists != 0 {
-- return devicemapper.RemoveDevice(devname)
-+ if err := devicemapper.RemoveDevice(devname); err != nil {
-+ return err
-+ }
-+
-+ if d, err := devicemapper.GetDeps(devname); err == nil {
-+ logrus.Warnf("[devmapper] device %s still has %d active dependents", devname, d.Count)
- }
-
- return nil
-diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go
-index 87c2003..44ca772 100644
---- a/pkg/devicemapper/devmapper_wrapper.go
-+++ b/pkg/devicemapper/devmapper_wrapper.go
-@@ -38,7 +38,10 @@ static void log_with_errno_init()
- */
- import "C"
-
--import "unsafe"
-+import (
-+ "reflect"
-+ "unsafe"
-+)
-
- type (
- CDmTask C.struct_dm_task
-@@ -184,12 +187,21 @@ func dmTaskGetDepsFct(task *CDmTask) *Deps {
- if Cdeps == nil {
- return nil
- }
-+
-+ // golang issue: https://github.com/golang/go/issues/11925
-+ hdr := reflect.SliceHeader{
-+ Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
-+ Len: int(Cdeps.count),
-+ Cap: int(Cdeps.count),
-+ }
-+ devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
-+
- deps := &Deps{
- Count: uint32(Cdeps.count),
- Filler: uint32(Cdeps.filler),
- }
-- for _, device := range Cdeps.device {
-- deps.Device = append(deps.Device, (uint64)(device))
-+ for _, device := range devices {
-+ deps.Device = append(deps.Device, uint64(device))
- }
- return deps
- }
---
-2.4.3
-
diff --git a/system/docker/patches/0002-Avoid-buffering-to-tempfile-when-pushing-with-V2.patch b/system/docker/patches/0002-Avoid-buffering-to-tempfile-when-pushing-with-V2.patch
deleted file mode 100644
index f8d827fb82..0000000000
--- a/system/docker/patches/0002-Avoid-buffering-to-tempfile-when-pushing-with-V2.patch
+++ /dev/null
@@ -1,220 +0,0 @@
-From cccc745d93a59fdbb4dd7d7562ee8dd684a00786 Mon Sep 17 00:00:00 2001
-From: Stephen J Day <stephen.day@docker.com>
-Date: Tue, 11 Aug 2015 13:47:08 -0700
-Subject: [PATCH 2/4] Avoid buffering to tempfile when pushing with V2
-
-The practice of buffering to a tempfile during a pushing contributes massively
-to slow V2 push performance perception. The protocol was actually designed to
-avoid precalculation, supporting cut-through data push. This means we can
-assemble the layer, calculate its digest and push to the remote endpoint, all
-at the same time.
-
-This should increase performance massively on systems with slow disks or IO
-bottlenecks.
-
-Signed-off-by: Stephen J Day <stephen.day@docker.com>
----
- graph/graph.go | 21 --------------
- graph/push_v2.go | 51 ++++++++++++++-------------------
- integration-cli/docker_cli_push_test.go | 2 +-
- pkg/jsonmessage/jsonmessage.go | 6 ++++
- pkg/jsonmessage/jsonmessage_test.go | 4 +--
- 5 files changed, 31 insertions(+), 53 deletions(-)
-
-diff --git a/graph/graph.go b/graph/graph.go
-index be911b0..885de87 100644
---- a/graph/graph.go
-+++ b/graph/graph.go
-@@ -2,7 +2,6 @@ package graph
-
- import (
- "compress/gzip"
-- "crypto/sha256"
- "encoding/json"
- "errors"
- "fmt"
-@@ -329,26 +328,6 @@ func (graph *Graph) newTempFile() (*os.File, error) {
- return ioutil.TempFile(tmp, "")
- }
-
--func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) {
-- var (
-- h = sha256.New()
-- w = gzip.NewWriter(io.MultiWriter(f, h))
-- )
-- _, err := io.Copy(w, src)
-- w.Close()
-- if err != nil {
-- return 0, "", err
-- }
-- n, err := f.Seek(0, os.SEEK_CUR)
-- if err != nil {
-- return 0, "", err
-- }
-- if _, err := f.Seek(0, 0); err != nil {
-- return 0, "", err
-- }
-- return n, digest.NewDigest("sha256", h), nil
--}
--
- // Delete atomically removes an image from the graph.
- func (graph *Graph) Delete(name string) error {
- id, err := graph.idIndex.Get(name)
-diff --git a/graph/push_v2.go b/graph/push_v2.go
-index 92d63ca..0ec8cfd 100644
---- a/graph/push_v2.go
-+++ b/graph/push_v2.go
-@@ -2,8 +2,8 @@ package graph
-
- import (
- "fmt"
-+ "io"
- "io/ioutil"
-- "os"
-
- "github.com/Sirupsen/logrus"
- "github.com/docker/distribution"
-@@ -199,7 +199,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
- func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
- out := p.config.OutStream
-
-- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
-+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))
-
- image, err := p.graph.Get(img.ID)
- if err != nil {
-@@ -209,52 +209,45 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
- if err != nil {
- return "", err
- }
--
-- tf, err := p.graph.newTempFile()
-- if err != nil {
-- return "", err
-- }
-- defer func() {
-- tf.Close()
-- os.Remove(tf.Name())
-- }()
--
-- size, dgst, err := bufferToFile(tf, arch)
-- if err != nil {
-- return "", err
-- }
-+ defer arch.Close()
-
- // Send the layer
-- logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
- layerUpload, err := bs.Create(context.Background())
- if err != nil {
- return "", err
- }
- defer layerUpload.Close()
-
-+ digester := digest.Canonical.New()
-+ tee := io.TeeReader(arch, digester.Hash())
-+
- reader := progressreader.New(progressreader.Config{
-- In: ioutil.NopCloser(tf),
-+ In: ioutil.NopCloser(tee), // we'll take care of close here.
- Out: out,
- Formatter: p.sf,
-- Size: int(size),
-- NewLines: false,
-- ID: stringid.TruncateID(img.ID),
-- Action: "Pushing",
-+ // TODO(stevvooe): This may cause a size reporting error. Try to get
-+ // this from tar-split or elsewhere. The main issue here is that we
-+ // don't want to buffer to disk *just* to calculate the size.
-+ Size: int(img.Size),
-+
-+ NewLines: false,
-+ ID: stringid.TruncateID(img.ID),
-+ Action: "Pushing",
- })
-- n, err := layerUpload.ReadFrom(reader)
-+
-+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
-+ nn, err := io.Copy(layerUpload, reader)
- if err != nil {
- return "", err
- }
-- if n != size {
-- return "", fmt.Errorf("short upload: only wrote %d of %d", n, size)
-- }
-
-- desc := distribution.Descriptor{Digest: dgst}
-- if _, err := layerUpload.Commit(context.Background(), desc); err != nil {
-+ dgst := digester.Digest()
-+ if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
- return "", err
- }
-
-- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
-+ logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
-+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))
-
- return dgst, nil
- }
-diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
-index 111e9f3..c17a0ea 100644
---- a/integration-cli/docker_cli_push_test.go
-+++ b/integration-cli/docker_cli_push_test.go
-@@ -108,7 +108,7 @@ func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) {
- }
-
- // Interrupt push (yes, we have no idea at what point it will get killed).
-- time.Sleep(200 * time.Millisecond)
-+ time.Sleep(50 * time.Millisecond) // dependent on race condition.
- if err := pushCmd.Process.Kill(); err != nil {
- c.Fatalf("Failed to kill push process: %v", err)
- }
-diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
-index 7db1626..c4b311e 100644
---- a/pkg/jsonmessage/jsonmessage.go
-+++ b/pkg/jsonmessage/jsonmessage.go
-@@ -61,8 +61,14 @@ func (p *JSONProgress) String() string {
- }
- pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
- }
-+
- numbersBox = fmt.Sprintf("%8v/%v", current, total)
-
-+ if p.Current > p.Total {
-+ // remove total display if the reported current is wonky.
-+ numbersBox = fmt.Sprintf("%8v", current)
-+ }
-+
- if p.Current > 0 && p.Start > 0 && percentage < 50 {
- fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
- perEntry := fromStart / time.Duration(p.Current)
-diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go
-index 2e78fa7..889b0ba 100644
---- a/pkg/jsonmessage/jsonmessage_test.go
-+++ b/pkg/jsonmessage/jsonmessage_test.go
-@@ -3,12 +3,12 @@ package jsonmessage
- import (
- "bytes"
- "fmt"
-+ "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/pkg/term"
- "github.com/docker/docker/pkg/timeutils"
-- "strings"
- )
-
- func TestError(t *testing.T) {
-@@ -45,7 +45,7 @@ func TestProgress(t *testing.T) {
- }
-
- // this number can't be negative gh#7136
-- expected = "[==================================================>] 50 B/40 B"
-+ expected = "[==================================================>] 50 B"
- jp5 := JSONProgress{Current: 50, Total: 40}
- if jp5.String() != expected {
- t.Fatalf("Expected %q, got %q", expected, jp5.String())
---
-2.4.3
-
diff --git a/system/docker/patches/0003-Refactoring-of-pullV2Tag.patch b/system/docker/patches/0003-Refactoring-of-pullV2Tag.patch
deleted file mode 100644
index 97f44bc78f..0000000000
--- a/system/docker/patches/0003-Refactoring-of-pullV2Tag.patch
+++ /dev/null
@@ -1,189 +0,0 @@
-From e12038fb4a2f74314cf23860ea97528418832ba5 Mon Sep 17 00:00:00 2001
-From: Alexander Morozov <lk4d4@docker.com>
-Date: Wed, 12 Aug 2015 20:23:56 -0700
-Subject: [PATCH 3/4] Refactoring of pullV2Tag
-
-* use downloadInfo pointers everywhere
-* use downloads slice only for things that we really download
-* cleanup tmp files in all cases
-
-Signed-off-by: Alexander Morozov <lk4d4@docker.com>
----
- graph/pull_v2.go | 106 ++++++++++++++++++++++++++++++-------------------------
- 1 file changed, 58 insertions(+), 48 deletions(-)
-
-diff --git a/graph/pull_v2.go b/graph/pull_v2.go
-index 1dbb9fe..ba5e8ce 100644
---- a/graph/pull_v2.go
-+++ b/graph/pull_v2.go
-@@ -139,6 +139,7 @@ func (p *v2Puller) download(di *downloadInfo) {
- di.err <- err
- return
- }
-+ di.tmpFile = tmpFile
-
- blobs := p.repo.Blobs(context.Background())
-
-@@ -187,7 +188,6 @@ func (p *v2Puller) download(di *downloadInfo) {
- out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
-
- logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
-- di.tmpFile = tmpFile
- di.layer = layerDownload
-
- di.err <- nil
-@@ -243,9 +243,9 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
-
- out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
-
-- downloads := make([]downloadInfo, len(manifest.FSLayers))
-+ var downloads []*downloadInfo
-
-- layerIDs := []string{}
-+ var layerIDs []string
- defer func() {
- p.graph.Release(p.sessionID, layerIDs...)
- }()
-@@ -256,66 +256,75 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
- logrus.Debugf("error getting image v1 json: %v", err)
- return false, err
- }
-- downloads[i].img = img
-- downloads[i].digest = manifest.FSLayers[i].BlobSum
--
- p.graph.Retain(p.sessionID, img.ID)
- layerIDs = append(layerIDs, img.ID)
-
- // Check if exists
- if p.graph.Exists(img.ID) {
- logrus.Debugf("Image already exists: %s", img.ID)
-+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
- continue
- }
--
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
-
-- downloads[i].err = make(chan error)
-- downloads[i].out = pipeWriter
-- go p.download(&downloads[i])
-+ d := &downloadInfo{
-+ img: img,
-+ digest: manifest.FSLayers[i].BlobSum,
-+ // TODO: seems like this chan buffer solved hanging problem in go1.5,
-+ // this can indicate some deeper problem that somehow we never take
-+ // error from channel in loop below
-+ err: make(chan error, 1),
-+ out: pipeWriter,
-+ }
-+ downloads = append(downloads, d)
-+
-+ go p.download(d)
- }
-
-- var tagUpdated bool
-- for i := len(downloads) - 1; i >= 0; i-- {
-- d := &downloads[i]
-- if d.err != nil {
-- if err := <-d.err; err != nil {
-- return false, err
-- }
-- }
-- if d.layer != nil {
-- // if tmpFile is empty assume download and extracted elsewhere
-- defer os.Remove(d.tmpFile.Name())
-- defer d.tmpFile.Close()
-- d.tmpFile.Seek(0, 0)
-+ // run clean for all downloads to prevent leftovers
-+ for _, d := range downloads {
-+ defer func(d *downloadInfo) {
- if d.tmpFile != nil {
--
-- reader := progressreader.New(progressreader.Config{
-- In: d.tmpFile,
-- Out: out,
-- Formatter: p.sf,
-- Size: int(d.size),
-- NewLines: false,
-- ID: stringid.TruncateID(d.img.ID),
-- Action: "Extracting",
-- })
--
-- err = p.graph.Register(d.img, reader)
-- if err != nil {
-- return false, err
-+ d.tmpFile.Close()
-+ if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
-+ logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
- }
-+ }
-+ }(d)
-+ }
-
-- if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
-- return false, err
-- }
-+ var tagUpdated bool
-+ for _, d := range downloads {
-+ if err := <-d.err; err != nil {
-+ return false, err
-+ }
-+ if d.layer == nil {
-+ continue
-+ }
-+ // if tmpFile is empty assume download and extracted elsewhere
-+ d.tmpFile.Seek(0, 0)
-+ reader := progressreader.New(progressreader.Config{
-+ In: d.tmpFile,
-+ Out: out,
-+ Formatter: p.sf,
-+ Size: int(d.size),
-+ NewLines: false,
-+ ID: stringid.TruncateID(d.img.ID),
-+ Action: "Extracting",
-+ })
-+
-+ err = p.graph.Register(d.img, reader)
-+ if err != nil {
-+ return false, err
-+ }
-
-- // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
-- }
-- out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
-- tagUpdated = true
-- } else {
-- out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
-+ if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
-+ return false, err
- }
-+
-+ // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
-+ out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
-+ tagUpdated = true
- }
-
- manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
-@@ -342,17 +351,18 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
- out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
- }
-
-+ firstID := layerIDs[len(layerIDs)-1]
- if utils.DigestReference(tag) {
- // TODO(stevvooe): Ideally, we should always set the digest so we can
- // use the digest whether we pull by it or not. Unfortunately, the tag
- // store treats the digest as a separate tag, meaning there may be an
- // untagged digest image that would seem to be dangling by a user.
-- if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
-+ if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
- return false, err
- }
- } else {
- // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
-- if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
-+ if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
- return false, err
- }
- }
---
-2.4.3
-
diff --git a/system/docker/patches/0004-deamon-events-use-UnixNano-and-no-goroutine.patch b/system/docker/patches/0004-deamon-events-use-UnixNano-and-no-goroutine.patch
deleted file mode 100644
index 9b9836842a..0000000000
--- a/system/docker/patches/0004-deamon-events-use-UnixNano-and-no-goroutine.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From 67c185dea42b6d4dc8f53280446488621ab04f7c Mon Sep 17 00:00:00 2001
-From: Vincent Batts <vbatts@redhat.com>
-Date: Tue, 15 Sep 2015 15:05:17 -0400
-Subject: [PATCH 4/4] deamon/events: use UnixNano and no goroutine
-
-Signed-off-by: Vincent Batts <vbatts@redhat.com>
----
- daemon/events/events.go | 24 +++++++++++-------------
- pkg/jsonmessage/jsonmessage.go | 5 +++--
- 2 files changed, 14 insertions(+), 15 deletions(-)
-
-diff --git a/daemon/events/events.go b/daemon/events/events.go
-index 07ee29a..aeb22e8 100644
---- a/daemon/events/events.go
-+++ b/daemon/events/events.go
-@@ -45,19 +45,17 @@ func (e *Events) Evict(l chan interface{}) {
- // Log broadcasts event to listeners. Each listener has 100 millisecond for
- // receiving event or it will be skipped.
- func (e *Events) Log(action, id, from string) {
-- go func() {
-- e.mu.Lock()
-- jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: time.Now().UTC().Unix()}
-- if len(e.events) == cap(e.events) {
-- // discard oldest event
-- copy(e.events, e.events[1:])
-- e.events[len(e.events)-1] = jm
-- } else {
-- e.events = append(e.events, jm)
-- }
-- e.mu.Unlock()
-- e.pub.Publish(jm)
-- }()
-+ e.mu.Lock()
-+ jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, TimeNano: time.Now().UTC().UnixNano()}
-+ if len(e.events) == cap(e.events) {
-+ // discard oldest event
-+ copy(e.events, e.events[1:])
-+ e.events[len(e.events)-1] = jm
-+ } else {
-+ e.events = append(e.events, jm)
-+ }
-+ e.mu.Unlock()
-+ e.pub.Publish(jm)
- }
-
- // SubscribersCount returns number of event listeners
-diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
-index c4b311e..8a24d9c 100644
---- a/pkg/jsonmessage/jsonmessage.go
-+++ b/pkg/jsonmessage/jsonmessage.go
-@@ -90,6 +90,7 @@ type JSONMessage struct {
- ID string `json:"id,omitempty"`
- From string `json:"from,omitempty"`
- Time int64 `json:"time,omitempty"`
-+ TimeNano int64 `json:"timeNano,omitempty"`
- Error *JSONError `json:"errorDetail,omitempty"`
- ErrorMessage string `json:"error,omitempty"` //deprecated
- }
-@@ -109,8 +110,8 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
- } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
- return nil
- }
-- if jm.Time != 0 {
-- fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
-+ if jm.Time != 0 || jm.TimeNano != 0 {
-+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, jm.TimeNano).Format(timeutils.RFC3339NanoFixed))
- }
- if jm.ID != "" {
- fmt.Fprintf(out, "%s: ", jm.ID)
---
-2.4.3
-
diff --git a/system/docker/patches/0005-vendor-update-tar-split-to-v0.9.10.patch b/system/docker/patches/0005-vendor-update-tar-split-to-v0.9.10.patch
deleted file mode 100644
index f343afae57..0000000000
--- a/system/docker/patches/0005-vendor-update-tar-split-to-v0.9.10.patch
+++ /dev/null
@@ -1,289 +0,0 @@
-From d8029ceb202fda8160855c07081dc51aae1ec1ad Mon Sep 17 00:00:00 2001
-From: Vincent Batts <vbatts@redhat.com>
-Date: Wed, 23 Sep 2015 15:50:23 -0400
-Subject: [PATCH 5/5] vendor: update tar-split to v0.9.10
-
-This addresses handling of non-utf8 file names, namely iso-8859-1.
-
-https://github.com/docker/docker/issues/16516
-
-Reported-by: @kwk
-Signed-off-by: Vincent Batts <vbatts@redhat.com>
----
- hack/vendor.sh | 2 +-
- .../vbatts/tar-split/archive/tar/common.go | 28 ++++++++++++++--
- .../vbatts/tar-split/archive/tar/reader.go | 15 ++++++++-
- .../vbatts/tar-split/archive/tar/writer.go | 2 +-
- .../vbatts/tar-split/tar/asm/assemble.go | 4 +--
- .../vbatts/tar-split/tar/asm/disassemble.go | 11 +++---
- .../vbatts/tar-split/tar/storage/entry.go | 39 ++++++++++++++++++++++
- .../vbatts/tar-split/tar/storage/packer.go | 13 ++++++--
- 8 files changed, 101 insertions(+), 13 deletions(-)
-
-diff --git a/hack/vendor.sh b/hack/vendor.sh
-index 68772ef..52ba6ef 100755
---- a/hack/vendor.sh
-+++ b/hack/vendor.sh
-@@ -36,7 +36,7 @@ clone git github.com/hashicorp/consul v0.5.2
-
- # get graph and distribution packages
- clone git github.com/docker/distribution ec87e9b6971d831f0eff752ddb54fb64693e51cd # docker/1.8 branch
--clone git github.com/vbatts/tar-split v0.9.6
-+clone git github.com/vbatts/tar-split v0.9.10
-
- clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
- clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5
-diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
-index e363aa7..c31df06 100644
---- a/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
-+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
-@@ -139,8 +139,8 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) {
- }
-
- switch fi.h.Typeflag {
-- case TypeLink, TypeSymlink:
-- // hard link, symbolic link
-+ case TypeSymlink:
-+ // symbolic link
- mode |= os.ModeSymlink
- case TypeChar:
- // character device node
-@@ -249,6 +249,30 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
- if fm&os.ModeSticky != 0 {
- h.Mode |= c_ISVTX
- }
-+ // If possible, populate additional fields from OS-specific
-+ // FileInfo fields.
-+ if sys, ok := fi.Sys().(*Header); ok {
-+ // This FileInfo came from a Header (not the OS). Use the
-+ // original Header to populate all remaining fields.
-+ h.Uid = sys.Uid
-+ h.Gid = sys.Gid
-+ h.Uname = sys.Uname
-+ h.Gname = sys.Gname
-+ h.AccessTime = sys.AccessTime
-+ h.ChangeTime = sys.ChangeTime
-+ if sys.Xattrs != nil {
-+ h.Xattrs = make(map[string]string)
-+ for k, v := range sys.Xattrs {
-+ h.Xattrs[k] = v
-+ }
-+ }
-+ if sys.Typeflag == TypeLink {
-+ // hard link
-+ h.Typeflag = TypeLink
-+ h.Size = 0
-+ h.Linkname = sys.Linkname
-+ }
-+ }
- if sysStat != nil {
- return h, sysStat(fi, h)
- }
-diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
-index c72e002..4168ea2 100644
---- a/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
-+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
-@@ -138,7 +138,13 @@ func (tr *Reader) Next() (*Header, error) {
- // We actually read the whole file,
- // but this skips alignment padding
- tr.skipUnread()
-+ if tr.err != nil {
-+ return nil, tr.err
-+ }
- hdr = tr.readHeader()
-+ if hdr == nil {
-+ return nil, tr.err
-+ }
- mergePAX(hdr, headers)
-
- // Check for a PAX format sparse file
-@@ -397,7 +403,7 @@ func parsePAX(r io.Reader) (map[string]string, error) {
- }
- // Parse the first token as a decimal integer.
- n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
-- if err != nil {
-+ if err != nil || n < 5 || int64(len(buf)) < n {
- return nil, ErrHeader
- }
- // Extract everything between the decimal and the n -1 on the
-@@ -553,6 +559,10 @@ func (tr *Reader) readHeader() *Header {
- hdr.Uid = int(tr.octal(s.next(8)))
- hdr.Gid = int(tr.octal(s.next(8)))
- hdr.Size = tr.octal(s.next(12))
-+ if hdr.Size < 0 {
-+ tr.err = ErrHeader
-+ return nil
-+ }
- hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
- s.next(8) // chksum
- hdr.Typeflag = s.next(1)[0]
-@@ -895,6 +905,9 @@ func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
- // Otherwise, we're at the end of the file
- return 0, io.EOF
- }
-+ if sfr.tot < sfr.sp[0].offset {
-+ return 0, io.ErrUnexpectedEOF
-+ }
- if sfr.pos < sfr.sp[0].offset {
- // We're in a hole
- n = sfr.readHole(b, sfr.sp[0].offset)
-diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
-index dafb2ca..9dbc01a 100644
---- a/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
-+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
-@@ -355,7 +355,7 @@ func paxHeader(msg string) string {
- // hdr.Size bytes are written after WriteHeader.
- func (tw *Writer) Write(b []byte) (n int, err error) {
- if tw.closed {
-- err = ErrWriteTooLong
-+ err = ErrWriteAfterClose
- return
- }
- overwrite := false
-diff --git a/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
-index 74317cb..83d6426 100644
---- a/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
-+++ b/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
-@@ -39,7 +39,7 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
- if entry.Size == 0 {
- continue
- }
-- fh, err := fg.Get(entry.Name)
-+ fh, err := fg.Get(entry.GetName())
- if err != nil {
- pw.CloseWithError(err)
- return
-@@ -56,7 +56,7 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
- // but since it's coming through the PipeReader, the context of
- // _which_ file would be lost...
- fh.Close()
-- pw.CloseWithError(fmt.Errorf("file integrity checksum failed for %q", entry.Name))
-+ pw.CloseWithError(fmt.Errorf("file integrity checksum failed for %q", entry.GetName()))
- return
- }
- fh.Close()
-diff --git a/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
-index 7986890..54ef23a 100644
---- a/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
-+++ b/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
-@@ -92,13 +92,16 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
- }
- }
-
-- // File entries added, regardless of size
-- _, err = p.AddEntry(storage.Entry{
-+ entry := storage.Entry{
- Type: storage.FileType,
-- Name: hdr.Name,
- Size: hdr.Size,
- Payload: csum,
-- })
-+ }
-+ // For proper marshalling of non-utf8 characters
-+ entry.SetName(hdr.Name)
-+
-+ // File entries added, regardless of size
-+ _, err = p.AddEntry(entry)
- if err != nil {
- pW.CloseWithError(err)
- return
-diff --git a/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
-index 38fe7ba..c91e7ea 100644
---- a/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
-+++ b/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
-@@ -1,5 +1,7 @@
- package storage
-
-+import "unicode/utf8"
-+
- // Entries is for sorting by Position
- type Entries []Entry
-
-@@ -33,7 +35,44 @@ const (
- type Entry struct {
- Type Type `json:"type"`
- Name string `json:"name,omitempty"`
-+ NameRaw []byte `json:"name_raw,omitempty"`
- Size int64 `json:"size,omitempty"`
- Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
- Position int `json:"position"`
- }
-+
-+// SetName will check name for valid UTF-8 string, and set the appropriate
-+// field. See https://github.com/vbatts/tar-split/issues/17
-+func (e *Entry) SetName(name string) {
-+ if utf8.ValidString(name) {
-+ e.Name = name
-+ } else {
-+ e.NameRaw = []byte(name)
-+ }
-+}
-+
-+// SetNameBytes will check name for valid UTF-8 string, and set the appropriate
-+// field
-+func (e *Entry) SetNameBytes(name []byte) {
-+ if utf8.Valid(name) {
-+ e.Name = string(name)
-+ } else {
-+ e.NameRaw = name
-+ }
-+}
-+
-+// GetName returns the string for the entry's name, regardless of the field stored in
-+func (e *Entry) GetName() string {
-+ if len(e.NameRaw) > 0 {
-+ return string(e.NameRaw)
-+ }
-+ return e.Name
-+}
-+
-+// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in
-+func (e *Entry) GetNameBytes() []byte {
-+ if len(e.NameRaw) > 0 {
-+ return e.NameRaw
-+ }
-+ return []byte(e.Name)
-+}
-diff --git a/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
-index a02a19a..0c9d99b 100644
---- a/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
-+++ b/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
-@@ -6,6 +6,7 @@ import (
- "errors"
- "io"
- "path/filepath"
-+ "unicode/utf8"
- )
-
- // ErrDuplicatePath occurs when a tar archive has more than one entry for the
-@@ -61,7 +62,7 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
-
- // check for dup name
- if e.Type == FileType {
-- cName := filepath.Clean(e.Name)
-+ cName := filepath.Clean(e.GetName())
- if _, ok := jup.seen[cName]; ok {
- return nil, ErrDuplicatePath
- }
-@@ -93,9 +94,17 @@ type jsonPacker struct {
- type seenNames map[string]struct{}
-
- func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
-+ // if Name is not valid utf8, switch it to raw first.
-+ if e.Name != "" {
-+ if !utf8.ValidString(e.Name) {
-+ e.NameRaw = []byte(e.Name)
-+ e.Name = ""
-+ }
-+ }
-+
- // check early for dup name
- if e.Type == FileType {
-- cName := filepath.Clean(e.Name)
-+ cName := filepath.Clean(e.GetName())
- if _, ok := jp.seen[cName]; ok {
- return -1, ErrDuplicatePath
- }
---
-2.4.3
-
diff --git a/system/docker/slack-desc b/system/docker/slack-desc
index e8f6ef52c1..6c22bf46a4 100644
--- a/system/docker/slack-desc
+++ b/system/docker/slack-desc
@@ -9,11 +9,11 @@
docker: docker (manager for applications in linux containers)
docker:
docker: Docker is an open-source project to easily create lightweight,
-docker: portable, self-sufficient containers from any application. The same
+docker: portable, self-sufficient containers from any application. The same
docker: container that a developer builds and tests on a laptop can run at
docker: scale, in production, on VMs, bare metal, OpenStack clusters, public
docker: clouds and more.
docker:
-docker: Homepage: https://docker.io/
+docker: Homepage: https://dockerproject.org/
docker:
docker: