Merge pull request #147 from tiborvass/import-man-docs-completion

Import man docs completion
master
Tibor Vass 2017-06-02 12:36:15 -07:00 committed by GitHub
commit d95fd2f38c
279 changed files with 44396 additions and 0 deletions

View File

@ -0,0 +1,2 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)
Jessie Frazelle <jess@docker.com> (@jfrazelle)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,409 @@
# docker.fish - docker completions for fish shell
#
# This file is generated by gen_docker_fish_completions.py from:
# https://github.com/barnybug/docker-fish-completion
#
# To install the completions:
# mkdir -p ~/.config/fish/completions
# cp docker.fish ~/.config/fish/completions
#
# Completion supported:
# - parameters
# - commands
# - containers
# - images
# - repositories
function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
for i in (commandline -opc)
if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats
return 1
end
end
return 0
end
function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
switch $select
case running
docker ps -a --no-trunc --filter status=running --format "{{.ID}}\n{{.Names}}" | tr ',' '\n'
case stopped
docker ps -a --no-trunc --filter status=exited --format "{{.ID}}\n{{.Names}}" | tr ',' '\n'
case all
docker ps -a --no-trunc --format "{{.ID}}\n{{.Names}}" | tr ',' '\n'
end
end
function __fish_print_docker_images --description 'Print a list of docker images'
docker images --format "{{.Repository}}:{{.Tag}}" | command grep -v '<none>'
end
function __fish_print_docker_repositories --description 'Print a list of docker repositories'
docker images --format "{{.Repository}}" | command grep -v '<none>' | command sort | command uniq
end
# common options
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled"
complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge'
complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options'
complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime'
complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.'
complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage'
complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication'
complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range"
complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules"
complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking'
complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")'
complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)'
complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU'
complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror'
complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options'
complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify'
complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here'
complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file'
complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file'
complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)'
complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit'
# subcommands
# attach
complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
# build
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)"
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
# commit
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
# cp
complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem"
complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage'
# create
complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)"
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)"
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of <name|id>:alias'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: <number>[<unit>], where unit = b, k, m or g)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: <number>[<unit>], where unit = b, k, m or g)"
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mount -d 'Attach a filesystem mount to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host"
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only"
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image"
# diff
complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container"
# events
complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server'
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')"
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp'
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp'
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template'
# exec
complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background'
complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached'
complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY'
complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container"
# export
complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive'
complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container"
# history
complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
# images
complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')"
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
# import
complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball'
complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage'
# info
complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information'
complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template'
complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage'
# inspect
complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
# kill
complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container"
# load
complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive'
complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN'
# login
complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
# logout
complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server'
# logs
complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
# port
complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT'
complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container"
# pause
complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container"
# ps
complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
# pull
complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository'
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository"
# push
complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository"
# rename
complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container'
# restart
complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.'
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container"
# rm
complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container"
# rmi
complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
# run
complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host'
complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of <name|id>:alias'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number>[<unit>], where unit = b, k, m or g)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: <number>[<unit>], where unit = b, k, m or g)"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mount -d 'Attach a filesystem mount to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image"
# save
complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive'
complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT'
complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
# search
complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars'
# start
complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process"
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN"
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container"
# stats
complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result'
complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
# stop
complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.'
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container"
# tag
complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository'
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force'
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage'
# top
complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container"
# unpause
complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container'
complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container"
# version
complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information'
complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template'
complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage'
# wait
complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code'
complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container"

View File

@ -0,0 +1 @@
See https://github.com/samneirinck/posh-docker

View File

@ -0,0 +1,2 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)
Jessie Frazelle <jess@docker.com> (@jfrazelle)

File diff suppressed because it is too large Load Diff

30
docs/README.md Normal file
View File

@ -0,0 +1,30 @@
# The non-reference docs have been moved!
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
The documentation for Docker Engine has been merged into
[the general documentation repo](https://github.com/docker/docker.github.io).
See the [README](https://github.com/docker/docker.github.io/blob/master/README.md)
for instructions on contributing to and building the documentation.
If you'd like to edit the current published version of the Engine docs,
do it in the master branch here:
https://github.com/docker/docker.github.io/tree/master/engine
If you need to document the functionality of an upcoming Engine release,
use the `vnext-engine` branch:
https://github.com/docker/docker.github.io/tree/vnext-engine/engine
The reference docs have been left in docker/docker (this repo), which remains
the place to edit them.
The docs in the general repo are open-source and we appreciate
your feedback and pull requests!

321
docs/deprecated.md Normal file
View File

@ -0,0 +1,321 @@
---
aliases: ["/engine/misc/deprecated/"]
title: "Deprecated Engine Features"
description: "Deprecated Features."
keywords: "docker, documentation, about, technology, deprecate"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Deprecated Engine Features
The following list of features are deprecated in Engine.
To learn more about Docker Engine's deprecation policy,
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
### Asynchronous `service create` and `service update`
**Deprecated In Release: v17.05.0**
**Disabled by default in release: v17.09**
Docker 17.05.0 added an optional `--detach=false` option to make the
`docker service create` and `docker service update` work synchronously. This
option will be enable by default in Docker 17.09, at which point the `--detach`
flag can be used to use the previous (asynchronous) behavior.
### `-g` and `--graph` flags on `dockerd`
**Deprecated In Release: v17.05.0**
The `-g` or `--graph` flag for the `dockerd` or `docker daemon` command was
used to indicate the directory in which to store persistent data and resource
configuration and has been replaced with the more descriptive `--data-root`
flag.
These flags were added before Docker 1.0, so will not be _removed_, only
_hidden_, to discourage their use.
### Top-level network properties in NetworkSettings
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
When inspecting a container, `NetworkSettings` contains top-level information
about the default ("bridge") network;
`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`,
`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`.
These properties are deprecated in favor of per-network properties in
`NetworkSettings.Networks`. These properties were already "deprecated" in
docker 1.9, but kept around for backward compatibility.
Refer to [#17538](https://github.com/docker/docker/pull/17538) for further
information.
### `filter` param for `/images/json` endpoint
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`.
### `repository:shortid` image references
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references.
### `docker daemon` subcommand
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
The daemon is moved to a separate binary (`dockerd`), and should be used instead.
### Duplicate keys with conflicting values in engine labels
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
Duplicate keys with conflicting values have been deprecated. A warning is displayed
in the output, and an error will be returned in the future.
### `MAINTAINER` in Dockerfile
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
`MAINTAINER` was an early very limited form of `LABEL` which should be used instead.
### API calls without a version
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
API versions should be supplied to all API calls to ensure compatibility with
future Engine versions. Instead of just requesting, for example, the URL
`/containers/json`, you must now request `/v1.25/containers/json`.
### Backing filesystem without `d_type` support for overlay/overlay2
**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
**Target For Removal In Release: v17.12**
The overlay and overlay2 storage driver does not work as expected if the backing
filesystem does not support `d_type`. For example, XFS does not support `d_type`
if it is formatted with the `ftype=0` option.
Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for
further information.
### Three arguments form in `docker import`
**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported.
### `-h` shorthand for `--help`
**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
**Target For Removal In Release: v17.09**
The shorthand (`-h`) is less common than `--help` on Linux and cannot be used
on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on
`docker create`). For this reason, the `-h` shorthand was not printed in the
"usage" output of subcommands, nor documented, and is now marked "deprecated".
### `-e` and `--email` flags on `docker login`
**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)**
**Target For Removal In Release: v17.06**
The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated.
### Separator (`:`) of `--security-opt` flag on `docker run`
**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)**
**Target For Removal In Release: v17.06**
The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`.
### `/containers/(id or name)/copy` endpoint
**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`.
### Ambiguous event fields in API
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure.
See the events API documentation for the new format.
### `-f` flag on `docker tag`
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use.
### HostConfig at API container start
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of
defining it at container creation (`POST /containers/create`).
### `--before` and `--since` flags on `docker ps`
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
The `docker ps --before` and `docker ps --since` options are deprecated.
Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
### `--automated` and `--stars` flags on `docker search`
**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
**Target For Removal In Release: v17.09**
The `docker search --automated` and `docker search --stars` options are deprecated.
Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead.
### Driver Specific Log Tags
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
Log tags are now generated in a standard way across different logging drivers.
Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and
`fluentd-tag` have been deprecated in favor of the generic `tag` option.
```bash
{% raw %}
docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
{% endraw %}
```
### LXC built-in exec driver
**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)**
**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed.
### Old Command Line Options
**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)**
**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand:
docker daemon -H ...
The following single-dash (`-opt`) variant of certain command line options
are deprecated and replaced with double-dash options (`--opt`):
docker attach -nostdin
docker attach -sig-proxy
docker build -no-cache
docker build -rm
docker commit -author
docker commit -run
docker events -since
docker history -notrunc
docker images -notrunc
docker inspect -format
docker ps -beforeId
docker ps -notrunc
docker ps -sinceId
docker rm -link
docker run -cidfile
docker run -dns
docker run -entrypoint
docker run -expose
docker run -link
docker run -lxc-conf
docker run -n
docker run -privileged
docker run -volumes-from
docker search -notrunc
docker search -stars
docker search -t
docker search -trusted
docker tag -force
The following double-dash options are deprecated and have no replacement:
docker run --cpuset
docker run --networking
docker ps --since-id
docker ps --before-id
docker search --trusted
**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
The single-dash (`-help`) was removed, in favor of the double-dash `--help`
docker -help
docker [COMMAND] -help
### `--run` flag on docker commit
**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)**
**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor
of the `--changes` flag that allows to pass `Dockerfile` commands.
### Interacting with V1 registries
**Disabled By Default In Release: v17.06**
**Target For Removal In Release: v17.12**
Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the
docker daemon from `pull`, `push`, and `login` operations against v1
registries. Though enabled by default, this signals the intent to deprecate
the v1 protocol.
Support for the v1 protocol to the public registry was removed in 1.13. Any
mirror configurations using v1 should be updated to use a
[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/).
### Docker Content Trust ENV passphrase variables name change
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables
- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE
- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE
### `--api-enable-cors` flag on dockerd
**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)**
**Target For Removal In Release: v17.09**
The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag
`--api-cors-header` instead.

164
docs/extend/EBS_volume.md Normal file
View File

@ -0,0 +1,164 @@
---
description: Volume plugin for Amazon EBS
keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume"
title: Volume plugin for Amazon EBS
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# A proof-of-concept Rexray plugin
In this example, a simple Rexray plugin will be created for the purposes of using
it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin.
The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin).
To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray)
## 1. Make a Docker image
The following is the Dockerfile used to containerize rexray.
```Dockerfile
FROM debian:jessie
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates
RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz
RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes
ENTRYPOINT ["rexray"]
CMD ["--help"]
```
To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image`
will reference the containerized rexray image.
## 2. Extract rootfs
```sh
$ TMPDIR=/tmp/rexray # for the purpose of this example
$ # create container without running it, to extract the rootfs from image
$ docker create --name rexray "$image"
$ # save the rootfs to a tar archive
$ docker export -o $TMPDIR/rexray.tar rexray
$ # extract rootfs from tar archive to a rootfs folder
$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar )
```
## 3. Add plugin configuration
We have to put the following JSON to `$TMPDIR/config.json`:
```json
{
"Args": {
"Description": "",
"Name": "",
"Settable": null,
"Value": null
},
"Description": "A proof-of-concept EBS plugin (using rexray) for Docker",
"Documentation": "https://github.com/tiborvass/rexray-plugin",
"Entrypoint": [
"/usr/bin/rexray", "service", "start", "-f"
],
"Env": [
{
"Description": "",
"Name": "REXRAY_SERVICE",
"Settable": [
"value"
],
"Value": "ebs"
},
{
"Description": "",
"Name": "EBS_ACCESSKEY",
"Settable": [
"value"
],
"Value": ""
},
{
"Description": "",
"Name": "EBS_SECRETKEY",
"Settable": [
"value"
],
"Value": ""
}
],
"Interface": {
"Socket": "rexray.sock",
"Types": [
"docker.volumedriver/1.0"
]
},
"Linux": {
"AllowAllDevices": true,
"Capabilities": ["CAP_SYS_ADMIN"],
"Devices": null
},
"Mounts": [
{
"Source": "/dev",
"Destination": "/dev",
"Type": "bind",
"Options": ["rbind"]
}
],
"Network": {
"Type": "host"
},
"PropagatedMount": "/var/lib/libstorage/volumes",
"User": {},
"WorkDir": ""
}
```
Please note a couple of points:
- `PropagatedMount` is needed so that the docker daemon can see mounts done by the
rexray plugin from within the container, otherwise the docker daemon is not able
to mount a docker volume.
- The rexray plugin needs dynamic access to host devices. For that reason, we
have to give it access to all devices under `/dev` and set `AllowAllDevices` to
true for proper access.
- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`,
`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this
plugin. Ideally other rexray parameters could also be set.
## 4. Create plugin
`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin.
```sh
$ docker plugin ls
ID NAME DESCRIPTION ENABLED
2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false
```
## 5. Test plugin
```sh
$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`
$ docker plugin enable tiborvass/rexray-plugin
$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume
$ docker volume ls
DRIVER VOLUME NAME
tiborvass/rexray-plugin:latest my-ebs-volume
$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi'
$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi
bye
```
## 6. Push plugin
First, ensure you are logged in with `docker login`. Then you can run:
`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker
image to a registry, to make it available for others to install via
`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`.

238
docs/extend/config.md Normal file
View File

@ -0,0 +1,238 @@
---
title: "Plugin config"
description: "How develop and use a plugin with the managed plugin system"
keywords: "API, Usage, plugins, documentation, developer"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Plugin Config Version 1 of Plugin V2
This document outlines the format of the V0 plugin configuration. The plugin
config described herein was introduced in the Docker daemon in the [v1.12.0
release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b).
Plugin configs describe the various constituents of a docker plugin. Plugin
configs can be serialized to JSON format with the following media types:
Config Type | Media Type
------------- | -------------
config | "application/vnd.docker.plugin.v1+json"
## *Config* Field Descriptions
Config provides the base accessible fields for working with V0 plugin format
in the registry.
- **`description`** *string*
description of the plugin
- **`documentation`** *string*
link to the documentation about the plugin
- **`interface`** *PluginInterface*
interface implemented by the plugins, struct consisting of the following fields
- **`types`** *string array*
types indicate what interface(s) the plugin currently implements.
currently supported:
- **docker.volumedriver/1.0**
- **docker.networkdriver/1.0**
- **docker.ipamdriver/1.0**
- **docker.authz/1.0**
- **docker.logdriver/1.0**
- **docker.metricscollector/1.0**
- **`socket`** *string*
socket is the name of the socket the engine should use to communicate with the plugins.
the socket will be created in `/run/docker/plugins`.
- **`entrypoint`** *string array*
entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint)
- **`workdir`** *string*
workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir)
- **`network`** *PluginNetwork*
network of the plugin, struct consisting of the following fields
- **`type`** *string*
network type.
currently supported:
- **bridge**
- **host**
- **none**
- **`mounts`** *PluginMount array*
mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts)
- **`name`** *string*
name of the mount.
- **`description`** *string*
description of the mount.
- **`source`** *string*
source of the mount.
- **`destination`** *string*
destination of the mount.
- **`type`** *string*
mount type.
- **`options`** *string array*
options of the mount.
- **`ipchost`** *boolean*
Access to host ipc namespace.
- **`pidhost`** *boolean*
Access to host pid namespace.
- **`propagatedMount`** *string*
path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins.
This path will be bind-mounted outisde of the plugin rootfs so it's contents
are preserved on upgrade.
- **`env`** *PluginEnv array*
env of the plugin, struct consisting of the following fields
- **`name`** *string*
name of the env.
- **`description`** *string*
description of the env.
- **`value`** *string*
value of the env.
- **`args`** *PluginArgs*
args of the plugin, struct consisting of the following fields
- **`name`** *string*
name of the args.
- **`description`** *string*
description of the args.
- **`value`** *string array*
values of the args.
- **`linux`** *PluginLinux*
- **`capabilities`** *string array*
capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security)
- **`allowAllDevices`** *boolean*
If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host.
- **`devices`** *PluginDevice array*
device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices)
- **`name`** *string*
name of the device.
- **`description`** *string*
description of the device.
- **`path`** *string*
path of the device.
## Example Config
*Example showing the 'tiborvass/sample-volume-plugin' plugin config.*
```json
{
"Args": {
"Description": "",
"Name": "",
"Settable": null,
"Value": null
},
"Description": "A sample volume plugin for Docker",
"Documentation": "https://docs.docker.com/engine/extend/plugins/",
"Entrypoint": [
"/usr/bin/sample-volume-plugin",
"/data"
],
"Env": [
{
"Description": "",
"Name": "DEBUG",
"Settable": [
"value"
],
"Value": "0"
}
],
"Interface": {
"Socket": "plugin.sock",
"Types": [
"docker.volumedriver/1.0"
]
},
"Linux": {
"Capabilities": null,
"AllowAllDevices": false,
"Devices": null
},
"Mounts": null,
"Network": {
"Type": ""
},
"PropagatedMount": "/data",
"User": {},
"Workdir": ""
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

262
docs/extend/index.md Normal file
View File

@ -0,0 +1,262 @@
---
description: Develop and use a plugin with the managed plugin system
keywords: "API, Usage, plugins, documentation, developer"
title: Managed plugin system
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Docker Engine managed plugin system
* [Installing and using a plugin](index.md#installing-and-using-a-plugin)
* [Developing a plugin](index.md#developing-a-plugin)
* [Debugging plugins](index.md#debugging-plugins)
Docker Engine's plugin system allows you to install, start, stop, and remove
plugins using Docker Engine.
For information about the legacy plugin system available in Docker Engine 1.12
and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md).
> **Note**: Docker Engine managed plugins are currently not supported
on Windows daemons.
## Installing and using a plugin
Plugins are distributed as Docker images and can be hosted on Docker Hub or on
a private registry.
To install a plugin, use the `docker plugin install` command, which pulls the
plugin from Docker Hub or your private registry, prompts you to grant
permissions or capabilities if necessary, and enables the plugin.
To check the status of installed plugins, use the `docker plugin ls` command.
Plugins that start successfully are listed as enabled in the output.
After a plugin is installed, you can use it as an option for another Docker
operation, such as creating a volume.
In the following example, you install the `sshfs` plugin, verify that it is
enabled, and use it to create a volume.
> **Note**: This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example.
1. Install the `sshfs` plugin.
```bash
$ docker plugin install vieux/sshfs
Plugin "vieux/sshfs" is requesting the following privileges:
- network: [host]
- capabilities: [CAP_SYS_ADMIN]
Do you grant the above permissions? [y/N] y
vieux/sshfs
```
The plugin requests 2 privileges:
- It needs access to the `host` network.
- It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run
the `mount` command.
2. Check that the plugin is enabled in the output of `docker plugin ls`.
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d789 vieux/sshfs latest the `sshfs` plugin true
```
3. Create a volume using the plugin.
This example mounts the `/remote` directory on host `1.2.3.4` into a
volume named `sshvolume`.
This volume can now be mounted into containers.
```bash
$ docker volume create \
-d vieux/sshfs \
--name sshvolume \
-o sshcmd=user@1.2.3.4:/remote \
-o password=$(cat file_containing_password_for_remote_host)
sshvolume
```
4. Verify that the volume was created successfully.
```bash
$ docker volume ls
DRIVER NAME
vieux/sshfs sshvolume
```
5. Start a container that uses the volume `sshvolume`.
```bash
$ docker run --rm -v sshvolume:/data busybox ls /data
<content of /remote on machine 1.2.3.4>
```
6. Remove the volume `sshvolume`
```bash
docker volume rm sshvolume
sshvolume
```
To disable a plugin, use the `docker plugin disable` command. To completely
remove it, use the `docker plugin remove` command. For other available
commands and options, see the
[command line reference](../reference/commandline/index.md).
## Developing a plugin
#### The rootfs directory
The `rootfs` directory represents the root filesystem of the plugin. In this
example, it was created from a Dockerfile:
>**Note:** The `/run/docker/plugins` directory is mandatory inside of the
plugin's filesystem for docker to communicate with the plugin.
```bash
$ git clone https://github.com/vieux/docker-volume-sshfs
$ cd docker-volume-sshfs
$ docker build -t rootfsimage .
$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created
$ sudo mkdir -p myplugin/rootfs
$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs
$ docker rm -vf "$id"
$ docker rmi rootfsimage
```
#### The config.json file
The `config.json` file describes the plugin. See the [plugins config reference](config.md).
Consider the following `config.json` file.
```json
{
"description": "sshFS plugin for Docker",
"documentation": "https://docs.docker.com/engine/extend/plugins/",
"entrypoint": ["/docker-volume-sshfs"],
"network": {
"type": "host"
},
"interface" : {
"types": ["docker.volumedriver/1.0"],
"socket": "sshfs.sock"
},
"linux": {
"capabilities": ["CAP_SYS_ADMIN"]
}
}
```
This plugin is a volume driver. It requires a `host` network and the
`CAP_SYS_ADMIN` capability. It depends upon the `/docker-volume-sshfs`
entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate
with Docker Engine. This plugin has no runtime parameters.
#### Creating the plugin
A new plugin can be created by running
`docker plugin create <plugin-name> ./path/to/plugin/data` where the plugin
data contains a plugin configuration file `config.json` and a root filesystem
in subdirectory `rootfs`.
After that the plugin `<plugin-name>` will show up in `docker plugin ls`.
Plugins can be pushed to remote registries with
`docker plugin push <plugin-name>`.
## Debugging plugins
Stdout of a plugin is redirected to dockerd logs. Such entries have a
`plugin=<ID>` suffix. Here are a few examples of commands for pluginID
`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their
corresponding log entries in the docker daemon logs.
```bash
$ docker plugin install tiborvass/sample-volume-plugins
INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
```
```bash
$ docker volume create -d tiborvass/sample-volume-plugins samplevol
INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0193] Created volume samplevol with mountpoint /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
```
```bash
$ docker run -v samplevol:/tmp busybox sh
INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0421] Mount Called... Mounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
```
#### Using docker-runc to obtain logfiles and shell into the plugin.
`docker-runc`, the default docker container runtime can be used for debugging
plugins. This is specifically useful to collect plugin logs if they are
redirected to a file.
```bash
$ docker-runc list
ID PID STATUS BUNDLE CREATED
f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z
r
```
```bash
$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log
```
If the plugin has a built-in shell, then exec into the plugin can be done as
follows:
```bash
$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh
```
#### Using curl to debug plugin socket issues.
To verify if the plugin API socket that the docker daemon communicates with
is responsive, use curl. In this example, we will make API calls from the
docker host to volume and network plugins using curl 7.47.0 to ensure that
the plugin is listening on the said socket. For a well functioning plugin,
these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/<pluginID>`
```bash
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null}
```
```bash
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
{"Scope":"local"}
```
When using curl 7.5 and above, the URL should be of the form
`http://hostname/APICall`, where `hostname` is the valid hostname where the
plugin is installed and `APICall` is the call to the plugin API.
For example, `http://localhost/VolumeDriver.List`

View File

@ -0,0 +1,101 @@
---
redirect_from:
- "/engine/extend/plugins/"
title: "Use Docker Engine plugins"
description: "How to add additional functionality to Docker with plugins extensions"
keywords: "Examples, Usage, plugins, docker, documentation, user guide"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
This document describes the Docker Engine plugins generally available in Docker
Engine. To view information on plugins managed by Docker,
refer to [Docker Engine plugin system](index.md).
You can extend the capabilities of the Docker Engine by loading third-party
plugins. This page explains the types of plugins and provides links to several
volume and network plugins for Docker.
## Types of plugins
Plugins extend Docker's functionality. They come in specific types. For
example, a [volume plugin](plugins_volume.md) might enable Docker
volumes to persist across multiple Docker hosts and a
[network plugin](plugins_network.md) might provide network plumbing.
Currently Docker supports authorization, volume and network driver plugins. In the future it
will support additional plugin types.
## Installing a plugin
Follow the instructions in the plugin's documentation.
## Finding a plugin
The sections below provide an inexhaustive overview of available plugins.
<style>
#DocumentationText tr td:first-child { white-space: nowrap;}
</style>
### Network plugins
Plugin | Description
----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards.
[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well.
[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity.
### Volume plugins
Plugin | Description
----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/).
[BeeGFS Volume Plugin](https://github.com/RedCoolBeans/docker-volume-beegfs) | An open source volume plugin to create persistent volumes in a BeeGFS parallel file system.
[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS.
[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS.
[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore.
[DigitalOcean Block Storage plugin](https://github.com/omallo/docker-volume-plugin-dostorage) | Integrates DigitalOcean's [block storage solution](https://www.digitalocean.com/products/storage/) into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet.
[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes.
[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines.
[Fuxi Volume Plugin](https://github.com/openstack/fuxi) | A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service.
[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks).
[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS.
[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3.
[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays.
[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume.
[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository.
[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`.
[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future.
[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems.
[Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker)| A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones.
[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few.
[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler.
[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform.
[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC.
[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices.
[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments.
### Authorization plugins
Plugin | Description
------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name).
[HBM plugin](https://github.com/kassisol/hbm) | An authorization plugin that prevents from executing commands with certains parameters.
## Troubleshooting a plugin
If you are having problems with Docker after loading a plugin, ask the authors
of the plugin for help. The Docker team may not be able to assist you.
## Writing a plugin
If you are interested in writing a plugin for Docker, or seeing how they work
under the hood, see the [docker plugins reference](plugin_api.md).

196
docs/extend/plugin_api.md Normal file
View File

@ -0,0 +1,196 @@
---
title: "Plugins API"
description: "How to write Docker plugins extensions "
keywords: "API, Usage, plugins, documentation, developer"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Docker Plugin API
Docker plugins are out-of-process extensions which add capabilities to the
Docker Engine.
This document describes the Docker Engine plugin API. To view information on
plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md).
This page is intended for people who want to develop their own Docker plugin.
If you just want to learn about or use Docker plugins, look
[here](legacy_plugins.md).
## What plugins are
A plugin is a process running on the same or a different host as the docker daemon,
which registers itself by placing a file on the same docker host in one of the plugin
directories described in [Plugin discovery](#plugin-discovery).
Plugins have human-readable names, which are short, lowercase strings. For
example, `flocker` or `weave`.
Plugins can run inside or outside containers. Currently running them outside
containers is recommended.
## Plugin discovery
Docker discovers plugins by looking for them in the plugin directory whenever a
user or container tries to use one by name.
There are three types of files which can be put in the plugin directory.
* `.sock` files are UNIX domain sockets.
* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`.
* `.json` files are text files containing a full json specification for the plugin.
Plugins with UNIX domain socket files must run on the same docker host, whereas
plugins with spec or json files can run on a different host if a remote URL is specified.
UNIX domain socket files must be located under `/run/docker/plugins`, whereas
spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`.
The name of the file (excluding the extension) determines the plugin name.
For example, the `flocker` plugin might create a UNIX socket at
`/run/docker/plugins/flocker.sock`.
You can define each plugin into a separated subdirectory if you want to isolate definitions from each other.
For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only
mount `/run/docker/plugins/flocker` inside the `flocker` container.
Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under
`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as
soon as it finds the first plugin definition with the given name.
### JSON specification
This is the JSON format for a plugin:
```json
{
"Name": "plugin-example",
"Addr": "https://example.com/docker/plugin",
"TLSConfig": {
"InsecureSkipVerify": false,
"CAFile": "/usr/shared/docker/certs/example-ca.pem",
"CertFile": "/usr/shared/docker/certs/example-cert.pem",
"KeyFile": "/usr/shared/docker/certs/example-key.pem"
}
}
```
The `TLSConfig` field is optional and TLS will only be verified if this configuration is present.
## Plugin lifecycle
Plugins should be started before Docker, and stopped after Docker. For
example, when packaging a plugin for a platform which supports `systemd`, you
might use [`systemd` dependencies](
http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
manage startup and shutdown order.
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
plugin, then start Docker again.
## Plugin activation
When a plugin is first referred to -- either by a user referring to it by name
(e.g. `docker run --volume-driver=foo`) or a container already configured to
use a plugin being started -- Docker looks for the named plugin in the plugin
directory and activates it with a handshake. See Handshake API below.
Plugins are *not* activated automatically at Docker daemon startup. Rather,
they are activated only lazily, or on-demand, when they are needed.
## Systemd socket activation
Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers)
natively supports socket activation. In order for a plugin to be socket activated it needs
a `service` file and a `socket` file.
The `service` file (for example `/lib/systemd/system/your-plugin.service`):
```
[Unit]
Description=Your plugin
Before=docker.service
After=network.target your-plugin.socket
Requires=your-plugin.socket docker.service
[Service]
ExecStart=/usr/lib/docker/your-plugin
[Install]
WantedBy=multi-user.target
```
The `socket` file (for example `/lib/systemd/system/your-plugin.socket`):
```
[Unit]
Description=Your plugin
[Socket]
ListenStream=/run/docker/plugins/your-plugin.sock
[Install]
WantedBy=sockets.target
```
This will allow plugins to be actually started when the Docker daemon connects to
the sockets they're listening on (for instance the first time the daemon uses them
or if one of the plugin goes down accidentally).
## API design
The Plugin API is RPC-style JSON over HTTP, much like webhooks.
Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to
implement an HTTP server and bind this to the UNIX socket mentioned in the
"plugin discovery" section.
All requests are HTTP `POST` requests.
The API is versioned via an Accept header, which currently is always set to
`application/vnd.docker.plugins.v1+json`.
## Handshake API
Plugins are activated via the following "handshake" API call.
### /Plugin.Activate
**Request:** empty body
**Response:**
```
{
"Implements": ["VolumeDriver"]
}
```
Responds with a list of Docker subsystems which this plugin implements.
After activation, the plugin will then be sent events from this subsystem.
Possible values are:
* [`authz`](plugins_authorization.md)
* [`NetworkDriver`](plugins_network.md)
* [`VolumeDriver`](plugins_volume.md)
## Plugin retries
Attempts to call a method on a plugin are retried with an exponential backoff
for up to 30 seconds. This may help when packaging plugins as containers, since
it gives plugin containers a chance to start up before failing any user
containers which depend on them.
## Plugins helpers
To ease plugins development, we're providing an `sdk` for each kind of plugins
currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers).

View File

@ -0,0 +1,260 @@
---
title: "Access authorization plugin"
description: "How to create authorization plugins to manage access control to your Docker daemon."
keywords: "security, authorization, authentication, docker, documentation, plugin, extend"
redirect_from:
- "/engine/extend/authorization/"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Create an authorization plugin
This document describes the Docker Engine plugins generally available in Docker
Engine. To view information on plugins managed by Docker Engine,
refer to [Docker Engine plugin system](index.md).
Docker's out-of-the-box authorization model is all or nothing. Any user with
permission to access the Docker daemon can run any Docker client command. The
same is true for callers using Docker's Engine API to contact the daemon. If you
require greater access control, you can create authorization plugins and add
them to your Docker daemon configuration. Using an authorization plugin, a
Docker administrator can configure granular access policies for managing access
to Docker daemon.
Anyone with the appropriate skills can develop an authorization plugin. These
skills, at their most basic, are knowledge of Docker, understanding of REST, and
sound programming knowledge. This document describes the architecture, state,
and methods information available to an authorization plugin developer.
## Basic principles
Docker's [plugin infrastructure](plugin_api.md) enables
extending Docker by loading, removing and communicating with
third-party components using a generic API. The access authorization subsystem
was built using this mechanism.
Using this subsystem, you don't need to rebuild the Docker daemon to add an
authorization plugin. You can add a plugin to an installed Docker daemon. You do
need to restart the Docker daemon to add a new plugin.
An authorization plugin approves or denies requests to the Docker daemon based
on both the current authentication context and the command context. The
authentication context contains all user details and the authentication method.
The command context contains all the relevant request data.
Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md).
Each plugin must reside within directories described under the
[Plugin discovery](plugin_api.md#plugin-discovery) section.
**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication
respectively.
## Default user authorization mechanism
If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name.
That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`.
## Basic architecture
You are responsible for registering your plugin as part of the Docker daemon
startup. You can install multiple plugins and chain them together. This chain
can be ordered. Each request to the daemon passes in order through the chain.
Only when all the plugins grant access to the resource, is the access granted.
When an HTTP request is made to the Docker daemon through the CLI or via the
Engine API, the authentication subsystem passes the request to the installed
authentication plugin(s). The request contains the user (caller) and command
context. The plugin is responsible for deciding whether to allow or deny the
request.
The sequence diagrams below depict an allow and deny authorization flow:
![Authorization Allow flow](images/authz_allow.png)
![Authorization Deny flow](images/authz_deny.png)
Each request sent to the plugin includes the authenticated user, the HTTP
headers, and the request/response body. Only the user name and the
authentication method used are passed to the plugin. Most importantly, no user
credentials or tokens are passed. Finally, not all request/response bodies
are sent to the authorization plugin. Only those request/response bodies where
the `Content-Type` is either `text/*` or `application/json` are sent.
For commands that can potentially hijack the HTTP connection (`HTTP
Upgrade`), such as `exec`, the authorization plugin is only called for the
initial HTTP requests. Once the plugin approves the command, authorization is
not applied to the rest of the flow. Specifically, the streaming data is not
passed to the authorization plugins. For commands that return chunked HTTP
response, such as `logs` and `events`, only the HTTP request is sent to the
authorization plugins.
During request/response processing, some authorization flows might
need to do additional queries to the Docker daemon. To complete such flows,
plugins can call the daemon API similar to a regular user. To enable these
additional queries, the plugin must provide the means for an administrator to
configure proper authentication and security policies.
## Docker client flows
To enable and configure the authorization plugin, the plugin developer must
support the Docker client interactions detailed in this section.
### Setting up Docker daemon
Enable the authorization plugin with a dedicated command line flag in the
`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID`
value. This value can be the plugins socket or a path to a specification file.
Authorization plugins can be loaded without restarting the daemon. Refer
to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information.
```bash
$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
```
Docker's authorization subsystem supports multiple `--authorization-plugin` parameters.
### Calling authorized command (allow)
```bash
$ docker pull centos
...
f1b10cd84249: Pull complete
...
```
### Calling unauthorized command (deny)
```bash
$ docker pull centos
...
docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed.
```
### Error from plugins
```bash
$ docker pull centos
...
docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?.
```
## API schema and implementation
In addition to Docker's standard plugin registration method, each plugin
should implement the following two methods:
* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request.
* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client.
#### /AuthZPlugin.AuthZReq
**Request**:
```json
{
"User": "The user identification",
"UserAuthNMethod": "The authentication method used",
"RequestMethod": "The HTTP method",
"RequestURI": "The HTTP request URI",
"RequestBody": "Byte array containing the raw HTTP request body",
"RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string "
}
```
**Response**:
```json
{
"Allow": "Determined whether the user is allowed or not",
"Msg": "The authorization message",
"Err": "The error message if things go wrong"
}
```
#### /AuthZPlugin.AuthZRes
**Request**:
```json
{
"User": "The user identification",
"UserAuthNMethod": "The authentication method used",
"RequestMethod": "The HTTP method",
"RequestURI": "The HTTP request URI",
"RequestBody": "Byte array containing the raw HTTP request body",
"RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string",
"ResponseBody": "Byte array containing the raw HTTP response body",
"ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string",
"ResponseStatusCode":"Response status code"
}
```
**Response**:
```json
{
"Allow": "Determined whether the user is allowed or not",
"Msg": "The authorization message",
"Err": "The error message if things go wrong"
}
```
### Request authorization
Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message.
#### Daemon -> Plugin
Name | Type | Description
-----------------------|-------------------|-------------------------------------------------------
User | string | The user identification
Authentication method | string | The authentication method used
Request method | enum | The HTTP method (GET/DELETE/POST)
Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json)
Request headers | map[string]string | Request headers as key value pairs (without the authorization header)
Request body | []byte | Raw request body
#### Plugin -> Daemon
Name | Type | Description
--------|--------|----------------------------------------------------------------------------------
Allow | bool | Boolean value indicating whether the request is allowed or denied
Msg | string | Authorization message (will be returned to the client in case the access is denied)
Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information)
### Response authorization
The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message.
#### Daemon -> Plugin
Name | Type | Description
----------------------- |------------------ |----------------------------------------------------
User | string | The user identification
Authentication method | string | The authentication method used
Request method | string | The HTTP method (GET/DELETE/POST)
Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json)
Request headers | map[string]string | Request headers as key value pairs (without the authorization header)
Request body | []byte | Raw request body
Response status code | int | Status code from the docker daemon
Response headers | map[string]string | Response headers as key value pairs
Response body | []byte | Raw docker daemon response body
#### Plugin -> Daemon
Name | Type | Description
--------|--------|----------------------------------------------------------------------------------
Allow | bool | Boolean value indicating whether the response is allowed or denied
Msg | string | Authorization message (will be returned to the client in case the access is denied)
Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information)

View File

@ -0,0 +1,403 @@
---
title: "Graphdriver plugins"
description: "How to manage image and container filesystems with external plugins"
keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api"
advisory: experimental
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
## Changelog
### 1.13.0
- Support v2 plugins
# Docker graph driver plugins
Docker graph driver plugins enable admins to use an external/out-of-process
graph driver for use with Docker engine. This is an alternative to using the
built-in storage drivers, such as aufs/overlay/devicemapper/btrfs.
You need to install and enable the plugin and then restart the Docker daemon
before using the plugin. See the following example for the correct ordering
of steps.
```
$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver
<output suppressed>
$ pkill dockerd
$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin
```
# Write a graph driver plugin
See the [plugin documentation](https://docs.docker.com/engine/extend/) for detailed information
on the underlying plugin protocol.
## Graph Driver plugin protocol
If a plugin registers itself as a `GraphDriver` when activated, then it is
expected to provide the rootfs for containers as well as image layer storage.
### /GraphDriver.Init
**Request**:
```json
{
"Home": "/graph/home/path",
"Opts": [],
"UIDMaps": [],
"GIDMaps": []
}
```
Initialize the graph driver plugin with a home directory and array of options.
These are passed through from the user, but the plugin is not required to parse
or honor them.
The request also includes a list of UID and GID mappings, structed as follows:
```json
{
"ContainerID": 0,
"HostID": 0,
"Size": 0
}
```
**Response**:
```json
{
"Err": ""
}
```
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Capabilities
**Request**:
```json
{}
```
Get behavioral characteristics of the graph driver. If a plugin does not handle
this request, the engine will use default values for all capabilities.
**Response**:
```json
{
"ReproducesExactDiffs": false,
}
```
Respond with values of capabilities:
* **ReproducesExactDiffs** Defaults to false. Flags that this driver is capable
of reproducing exactly equivalent diffs for read-only filesystem layers.
### /GraphDriver.Create
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142",
"MountLabel": "",
"StorageOpt": {}
}
```
Create a new, empty, read-only filesystem layer with the specified
`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no
parent layer. `StorageOpt` is map of strings which indicate storage options.
**Response**:
```json
{
"Err": ""
}
```
Respond with a non-empty string error if an error occurred.
### /GraphDriver.CreateReadWrite
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142",
"MountLabel": "",
"StorageOpt": {}
}
```
Similar to `/GraphDriver.Create` but creates a read-write filesystem layer.
### /GraphDriver.Remove
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187"
}
```
Remove the filesystem layer with this given `ID`.
**Response**:
```json
{
"Err": ""
}
```
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Get
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"MountLabel": ""
}
```
Get the mountpoint for the layered filesystem referred to by the given `ID`.
**Response**:
```json
{
"Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Err": ""
}
```
Respond with the absolute path to the mounted layered filesystem.
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Put
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187"
}
```
Release the system resources for the specified `ID`, such as unmounting the
filesystem layer.
**Response**:
```json
{
"Err": ""
}
```
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Exists
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187"
}
```
Determine if a filesystem layer with the specified `ID` exists.
**Response**:
```json
{
"Exists": true
}
```
Respond with a boolean for whether or not the filesystem layer with the specified
`ID` exists.
### /GraphDriver.Status
**Request**:
```json
{}
```
Get low-level diagnostic information about the graph driver.
**Response**:
```json
{
"Status": [[]]
}
```
Respond with a 2-D array with key/value pairs for the underlying status
information.
### /GraphDriver.GetMetadata
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187"
}
```
Get low-level diagnostic information about the layered filesystem with the
with the specified `ID`
**Response**:
```json
{
"Metadata": {},
"Err": ""
}
```
Respond with a set of key/value pairs containing the low-level diagnostic
information about the layered filesystem.
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Cleanup
**Request**:
```json
{}
```
Perform necessary tasks to release resources help by the plugin, such as
unmounting all the layered file systems.
**Response**:
```json
{
"Err": ""
}
```
Respond with a non-empty string error if an error occurred.
### /GraphDriver.Diff
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142"
}
```
Get an archive of the changes between the filesystem layers specified by the `ID`
and `Parent`. `Parent` may be an empty string, in which case there is no parent.
**Response**:
```
{% raw %}
{{ TAR STREAM }}
{% endraw %}
```
### /GraphDriver.Changes
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142"
}
```
Get a list of changes between the filesystem layers specified by the `ID` and
`Parent`. If `Parent` is an empty string, there is no parent.
**Response**:
```json
{
"Changes": [{}],
"Err": ""
}
```
Respond with a list of changes. The structure of a change is:
```json
"Path": "/some/path",
"Kind": 0,
```
Where the `Path` is the filesystem path within the layered filesystem that is
changed and `Kind` is an integer specifying the type of change that occurred:
- 0 - Modified
- 1 - Added
- 2 - Deleted
Respond with a non-empty string error if an error occurred.
### /GraphDriver.ApplyDiff
**Request**:
```
{% raw %}
{{ TAR STREAM }}
{% endraw %}
```
Extract the changeset from the given diff into the layer with the specified `ID`
and `Parent`
**Query Parameters**:
- id (required)- the `ID` of the new filesystem layer to extract the diff to
- parent (required)- the `Parent` of the given `ID`
**Response**:
```json
{
"Size": 512366,
"Err": ""
}
```
Respond with the size of the new layer in bytes.
Respond with a non-empty string error if an error occurred.
### /GraphDriver.DiffSize
**Request**:
```json
{
"ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187",
"Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142"
}
```
Calculate the changes between the specified `ID`
**Response**:
```json
{
"Size": 512366,
"Err": ""
}
```
Respond with the size changes between the specified `ID` and `Parent`
Respond with a non-empty string error if an error occurred.

View File

@ -0,0 +1,220 @@
---
title: "Docker log driver plugins"
description: "Log driver plugins."
keywords: "Examples, Usage, plugins, docker, documentation, user guide, logging"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Logging driver plugins
This document describes logging driver plugins for Docker.
Logging drivers enables users to forward container logs to another service for
processing. Docker includes several logging drivers as built-ins, however can
never hope to support all use-cases with built-in drivers. Plugins allow Docker
to support a wide range of logging services without requiring to embed client
libraries for these services in the main Docker codebase. See the
[plugin documentation](legacy_plugins.md) for more information.
## Create a logging plugin
The main interface for logging plugins uses the same JSON+HTTP RPC protocol used
by other plugin types. See the
[example](https://github.com/cpuguy83/docker-log-driver-test) plugin for a
reference implementation of a logging plugin. The example wraps the built-in
`jsonfilelog` log driver.
## LogDriver protocol
Logging plugins must register as a `LogDriver` during plugin activation. Once
activated users can specify the plugin as a log driver.
There are two HTTP endpoints that logging plugins must implement:
### `/LogDriver.StartLogging`
Signals to the plugin that a container is starting that the plugin should start
receiving logs for.
Logs will be streamed over the defined file in the request. On Linux this file
is a FIFO. Logging plugins are not currently supported on Windows.
**Request**:
```json
{
"File": "/path/to/file/stream",
"Info": {
"ContainerID": "123456"
}
}
```
`File` is the path to the log stream that needs to be consumed. Each call to
`StartLogging` should provide a different file path, even if it's a container
that the plugin has already received logs for prior. The file is created by
docker with a randomly generated name.
`Info` is details about the container that's being logged. This is fairly
free-form, but is defined by the following struct definition:
```go
type Info struct {
Config map[string]string
ContainerID string
ContainerName string
ContainerEntrypoint string
ContainerArgs []string
ContainerImageID string
ContainerImageName string
ContainerCreated time.Time
ContainerEnv []string
ContainerLabels map[string]string
LogPath string
DaemonName string
}
```
`ContainerID` will always be supplied with this struct, but other fields may be
empty or missing.
**Response**
```json
{
"Err": ""
}
```
If an error occurred during this request, add an error message to the `Err` field
in the response. If no error then you can either send an empty response (`{}`)
or an empty value for the `Err` field.
The driver should at this point be consuming log messages from the passed in file.
If messages are unconsumed, it may cause the container to block while trying to
write to its stdio streams.
Log stream messages are encoded as protocol buffers. The protobuf definitions are
in the
[docker repository](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/entry.proto).
Since protocol buffers are not self-delimited you must decode them from the stream
using the following stream format:
```
[size][message]
```
Where `size` is a 4-byte big endian binary encoded uint32. `size` in this case
defines the size of the next message. `message` is the actual log entry.
A reference golang implementation of a stream encoder/decoder can be found
[here](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/io.go)
### `/LogDriver.StopLogging`
Signals to the plugin to stop collecting logs from the defined file.
Once a response is received, the file will be removed by Docker. You must make
sure to collect all logs on the stream before responding to this request or risk
losing log data.
Requests on this endpoint does not mean that the container has been removed
only that it has stopped.
**Request**:
```json
{
"File": "/path/to/file/stream"
}
```
**Response**:
```json
{
"Err": ""
}
```
If an error occurred during this request, add an error message to the `Err` field
in the response. If no error then you can either send an empty response (`{}`)
or an empty value for the `Err` field.
## Optional endpoints
Logging plugins can implement two extra logging endpoints:
### `/LogDriver.Capabilities`
Defines the capabilities of the log driver. You must implement this endpoint for
Docker to be able to take advantage of any of the defined capabilities.
**Request**:
```json
{}
```
**Response**:
```json
{
"ReadLogs": true
}
```
Supported capabilities:
- `ReadLogs` - this tells Docker that the plugin is capable of reading back logs
to clients. Plugins that report that they support `ReadLogs` must implement the
`/LogDriver.ReadLogs` endpoint
### `/LogDriver.ReadLogs`
Reads back logs to the client. This is used when `docker logs <container>` is
called.
In order for Docker to use this endpoint, the plugin must specify as much when
`/LogDriver.Capabilities` is called.
**Request**:
```json
{
"ReadConfig": {},
"Info": {
"ContainerID": "123456"
}
}
```
`ReadConfig` is the list of options for reading, it is defined with the following
golang struct:
```go
type ReadConfig struct {
Since time.Time
Tail int
Follow bool
}
```
- `Since` defines the oldest log that should be sent.
- `Tail` defines the number of lines to read (e.g. like the command `tail -n 10`)
- `Follow` signals that the client wants to stay attached to receive new log messages
as they come in once the existing logs have been read.
`Info` is the same type defined in `/LogDriver.StartLogging`. It should be used
to determine what set of logs to read.
**Response**:
```
{{ log stream }}
```
The response should be the encoded log message using the same format as the
messages that the plugin consumed from Docker.

View File

@ -0,0 +1,85 @@
---
title: "Docker metrics collector plugins"
description: "Metrics plugins."
keywords: "Examples, Usage, plugins, docker, documentation, user guide, metrics"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Metrics Collector Plugins
Docker exposes internal metrics based on the prometheus format. Metrics plugins
enable accessing these metrics in a consistent way by providing a Unix
socket at a predefined path where the plugin can scrape the metrics.
> **Note**: that while the plugin interface for metrics is non-experimental, the naming
of the metrics and metric labels is still considered experimental and may change
in a future version.
## Creating a metrics plugin
You must currently set `PropagatedMount` in the plugin `config.json` to
`/run/docker`. This allows the plugin to receive updated mounts
(the bind-mounted socket) from Docker after the plugin is already configured.
## MetricsCollector protocol
Metrics plugins must register as implementing the`MetricsCollector` interface
in `config.json`.
On Unix platforms, the socket is located at `/run/docker/metrics.sock` in the
plugin's rootfs.
`MetricsCollector` must implement two endpoints:
### `MetricsCollector.StartMetrics`
Signals to the plugin that the metrics socket is now available for scraping
**Request**
```json
{}
```
The request has no playload.
**Response**
```json
{
"Err": ""
}
```
If an error occurred during this request, add an error message to the `Err` field
in the response. If no error then you can either send an empty response (`{}`)
or an empty value for the `Err` field. Errors will only be logged.
### `MetricsCollector.StopMetrics`
Signals to the plugin that the metrics socket is no longer available.
This may happen when the daemon is shutting down.
**Request**
```json
{}
```
The request has no playload.
**Response**
```json
{
"Err": ""
}
```
If an error occurred during this request, add an error message to the `Err` field
in the response. If no error then you can either send an empty response (`{}`)
or an empty value for the `Err` field. Errors will only be logged.

View File

@ -0,0 +1,77 @@
---
title: "Docker network driver plugins"
description: "Network driver plugins."
keywords: "Examples, Usage, plugins, docker, documentation, user guide"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Engine network driver plugins
This document describes Docker Engine network driver plugins generally
available in Docker Engine. To view information on plugins
managed by Docker Engine, refer to [Docker Engine plugin system](index.md).
Docker Engine network plugins enable Engine deployments to be extended to
support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN
or something completely different. Network driver plugins are supported via the
LibNetwork project. Each plugin is implemented as a "remote driver" for
LibNetwork, which shares plugin infrastructure with Engine. Effectively, network
driver plugins are activated in the same way as other plugins, and use the same
kind of protocol.
## Network driver plugins and swarm mode
Docker 1.12 adds support for cluster management and orchestration called
[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently
only supports the built-in overlay driver for networking. Therefore existing
networking plugins will not work in swarm mode.
When you run Docker Engine outside of swarm mode, all networking plugins that
worked in Docker 1.11 will continue to function normally. They do not require
any modification.
## Using network driver plugins
The means of installing and running a network driver plugin depend on the
particular plugin. So, be sure to install your plugin according to the
instructions obtained from the plugin developer.
Once running however, network driver plugins are used just like the built-in
network drivers: by being mentioned as a driver in network-oriented Docker
commands. For example,
$ docker network create --driver weave mynet
Some network driver plugins are listed in [plugins](legacy_plugins.md)
The `mynet` network is now owned by `weave`, so subsequent commands
referring to that network will be sent to the plugin,
$ docker run --network=mynet busybox top
## Write a network plugin
Network plugins implement the [Docker plugin
API](plugin_api.md) and the network plugin protocol
## Network plugin protocol
The network driver protocol, in addition to the plugin activation call, is
documented as part of libnetwork:
[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md).
# Related Information
To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`.
- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/)
- The [LibNetwork](https://github.com/docker/libnetwork) project

View File

@ -0,0 +1,186 @@
---
description: Using services with plugins
keywords: "API, Usage, plugins, documentation, developer"
title: Plugins and Services
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Using Volume and Network plugins in Docker services
In swarm mode, it is possible to create a service that allows for attaching
to networks or mounting volumes that are backed by plugins. Swarm schedules
services based on plugin availability on a node.
### Volume plugins
In this example, a volume plugin is installed on a swarm worker and a volume
is created using the plugin. In the manager, a service is created with the
relevant mount options. It can be observed that the service is scheduled to
run on the worker node with the said volume plugin and volume. Note that,
node1 is the manager and node2 is the worker.
1. Prepare manager. In node 1:
```bash
$ docker swarm init
Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
```
2. Join swarm, install plugin and create volume on worker. In node 2:
```bash
$ docker swarm join \
--token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
192.168.99.100:2377
```
```bash
$ docker plugin install tiborvass/sample-volume-plugin
latest: Pulling from tiborvass/sample-volume-plugin
eb9c16fbdc53: Download complete
Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639
Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest
Installed plugin tiborvass/sample-volume-plugin
```
```bash
$ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol
```
3. Create a service using the plugin and volume. In node1:
```bash
$ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top
$ docker service ls
z1sj8bb8jnfn my-service replicated 1/1 busybox:latest
```
docker service ls shows service 1 instance of service running.
4. Observe the task getting scheduled in node 2:
```bash
{% raw %}
$ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}'
83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top"
{% endraw %}
```
### Network plugins
In this example, a global scope network plugin is installed on both the
swarm manager and worker. A service is created with replicated instances
using the installed plugin. We will observe how the availability of the
plugin determines network creation and container scheduling.
Note that node1 is the manager and node2 is the worker.
1. Install a global scoped network plugin on both manager and worker. On node1
and node2:
```bash
$ docker plugin install bboreham/weave2
Plugin "bboreham/weave2" is requesting the following privileges:
- network: [host]
- capabilities: [CAP_SYS_ADMIN CAP_NET_ADMIN]
Do you grant the above permissions? [y/N] y
latest: Pulling from bboreham/weave2
7718f575adf7: Download complete
Digest: sha256:2780330cc15644b60809637ee8bd68b4c85c893d973cb17f2981aabfadfb6d72
Status: Downloaded newer image for bboreham/weave2:latest
Installed plugin bboreham/weave2
```
2. Create a network using plugin on manager. On node1:
```bash
$ docker network create --driver=bboreham/weave2:latest globalnet
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
qlj7ueteg6ly globalnet bboreham/weave2:latest swarm
```
3. Create a service on the manager and have replicas set to 8. Observe that
containers get scheduled on both manager and worker.
On node 1:
```bash
$ docker service create --network globalnet --name myservice --replicas=8 mrjana/simpleweb simpleweb
w90drnfzw85nygbie9kb89vpa
```
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
87520965206a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.4.ytdzpktmwor82zjxkh118uf1v
15e24de0f7aa mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.2.kh7a9n3iauq759q9mtxyfs9hp
c8c8f0144cdc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.6.sjhpj5gr3xt33e3u2jycoj195
2e8e4b2c5c08 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.8.2z29zowsghx66u2velublwmrh
```
On node 2:
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
53c0ae7c1dae mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.7.x44tvvdm3iwkt9kif35f7ykz1
9b56c627fee0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.1.x7n1rm6lltw5gja3ueikze57q
d4f5927ba52c mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up 1 second myservice.5.i97bfo9uc6oe42lymafs9rz6k
478c0d395bd7 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.3.yr7nkffa48lff1vrl2r1m1ucs
```
4. Scale down the number of instances. On node1:
```bash
$ docker service scale myservice=0
myservice scaled to 0
```
5. Disable and uninstall the plugin on the worker. On node2:
```bash
$ docker plugin rm -f bboreham/weave2
bboreham/weave2
```
6. Scale up the number of instances again. Observe that all containers are
scheduled on the master and not on the worker, because the plugin is not available on the worker anymore.
On node 1:
```bash
$ docker service scale myservice=8
myservice scaled to 8
```
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
cf4b0ec2415e mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.3.r7p5o208jmlzpcbm2ytl3q6n1
57c64a6a2b88 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.4.dwoezsbb02ccstkhlqjy2xe7h
3ac68cc4e7b8 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 35 seconds myservice.5.zx4ezdrm2nwxzkrwnxthv0284
006c3cb318fc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.8.q0e3umt19y3h3gzo1ty336k5r
dd2ffebde435 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.7.a77y3u22prjipnrjg7vzpv3ba
a86c74d8b84b mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.6.z9nbn14bagitwol1biveeygl7
2846a7850ba0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 37 seconds myservice.2.ypufz2eh9fyhppgb89g8wtj76
e2ec01efcd8a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 38 seconds myservice.1.8w7c4ttzr6zcb9sjsqyhwp3yl
```
On node 2:
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
```

View File

@ -0,0 +1,360 @@
---
title: "Volume plugins"
description: "How to manage data with external volume plugins"
keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Write a volume plugin
Docker Engine volume plugins enable Engine deployments to be integrated with
external storage systems such as Amazon EBS, and enable data volumes to persist
beyond the lifetime of a single Docker host. See the
[plugin documentation](legacy_plugins.md) for more information.
## Changelog
### 1.13.0
- If used as part of the v2 plugin architecture, mountpoints that are part of
paths returned by the plugin must be mounted under the directory specified by
`PropagatedMount` in the plugin configuration
([#26398](https://github.com/docker/docker/pull/26398))
### 1.12.0
- Add `Status` field to `VolumeDriver.Get` response
([#21006](https://github.com/docker/docker/pull/21006#))
- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver
([#22077](https://github.com/docker/docker/pull/22077))
### 1.10.0
- Add `VolumeDriver.Get` which gets the details about the volume
([#16534](https://github.com/docker/docker/pull/16534))
- Add `VolumeDriver.List` which lists all volumes owned by the driver
([#16534](https://github.com/docker/docker/pull/16534))
### 1.8.0
- Initial support for volume driver plugins
([#14659](https://github.com/docker/docker/pull/14659))
## Command-line changes
To give a container access to a volume, use the `--volume` and `--volume-driver`
flags on the `docker container run` command. The `--volume` (or `-v`) flag
accepts a volume name and path on the host, and the `--volume-driver` flag
accepts a driver type.
```bash
$ docker volume create --driver=flocker volumename
$ docker container run -it --volume volumename:/data busybox sh
```
### `--volume`
The `--volume` (or `-v`) flag takes a value that is in the format
`<volume_name>:<mountpoint>`. The two parts of the value are
separated by a colon (`:`) character.
- The volume name is a human-readable name for the volume, and cannot begin with
a `/` character. It is referred to as `volume_name` in the rest of this topic.
- The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the
volume has been made available.
### `volumedriver`
Specifying a `volumedriver` in conjunction with a `volumename` allows you to
use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage
volumes external to a single host, such as those on EBS.
## Create a VolumeDriver
The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
field of type `string` allowing to specify the name of the driver. If not
specified, it defaults to `"local"` (the default driver for local volumes).
## Volume plugin protocol
If a plugin registers itself as a `VolumeDriver` when activated, it must
provide the Docker Daemon with writeable paths on the host filesystem. The Docker
daemon provides these paths to containers to consume. The Docker daemon makes
the volumes available by bind-mounting the provided paths into the containers.
> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/`
> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/`
> directory is reserved for Docker.
### `/VolumeDriver.Create`
**Request**:
```json
{
"Name": "volume_name",
"Opts": {}
}
```
Instruct the plugin that the user wants to create a volume, given a user
specified volume name. The plugin does not need to actually manifest the
volume on the filesystem yet (until `Mount` is called).
`Opts` is a map of driver specific options passed through from the user request.
**Response**:
```json
{
"Err": ""
}
```
Respond with a string error if an error occurred.
### `/VolumeDriver.Remove`
**Request**:
```json
{
"Name": "volume_name"
}
```
Delete the specified volume from disk. This request is issued when a user
invokes `docker rm -v` to remove volumes associated with a container.
**Response**:
```json
{
"Err": ""
}
```
Respond with a string error if an error occurred.
### `/VolumeDriver.Mount`
**Request**:
```json
{
"Name": "volume_name",
"ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
}
```
Docker requires the plugin to provide a volume, given a user specified volume
name. `Mount` is called once per container start. If the same `volume_name` is requested
more than once, the plugin may need to keep track of each new mount request and provision
at the first mount request and deprovision at the last corresponding unmount request.
`ID` is a unique ID for the caller that is requesting the mount.
**Response**:
- **v1**:
```json
{
"Mountpoint": "/path/to/directory/on/host",
"Err": ""
}
```
- **v2**:
```json
{
"Mountpoint": "/path/under/PropagatedMount",
"Err": ""
}
```
`Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume
has been made available.
`Err` is either empty or contains an error string.
### `/VolumeDriver.Path`
**Request**:
```json
{
"Name": "volume_name"
}
```
Request the path to the volume with the given `volume_name`.
**Response**:
- **v1**:
```json
{
"Mountpoin": "/path/to/directory/on/host",
"Err": ""
}
```
- **v2**:
```json
{
"Mountpoint": "/path/under/PropagatedMount",
"Err": ""
}
```
Respond with the path on the host (v1) or inside the plugin (v2) where the
volume has been made available, and/or a string error if an error occurred.
`Mountpoint` is optional. However, the plugin may be queried again later if one
is not provided.
### `/VolumeDriver.Unmount`
**Request**:
```json
{
"Name": "volume_name",
"ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
}
```
Docker is no longer using the named volume. `Unmount` is called once per
container stop. Plugin may deduce that it is safe to deprovision the volume at
this point.
`ID` is a unique ID for the caller that is requesting the mount.
**Response**:
```json
{
"Err": ""
}
```
Respond with a string error if an error occurred.
### `/VolumeDriver.Get`
**Request**:
```json
{
"Name": "volume_name"
}
```
Get info about `volume_name`.
**Response**:
- **v1**:
```json
{
"Volume": {
"Name": "volume_name",
"Mountpoint": "/path/to/directory/on/host",
"Status": {}
},
"Err": ""
}
```
- **v2**:
```json
{
"Volume": {
"Name": "volume_name",
"Mountpoint": "/path/under/PropagatedMount",
"Status": {}
},
"Err": ""
}
```
Respond with a string error if an error occurred. `Mountpoint` and `Status` are
optional.
### /VolumeDriver.List
**Request**:
```json
{}
```
Get the list of volumes registered with the plugin.
**Response**:
- **v1**:
```json
{
"Volumes": [
{
"Name": "volume_name",
"Mountpoint": "/path/to/directory/on/host"
}
],
"Err": ""
}
```
- **v2**:
```json
{
"Volumes": [
{
"Name": "volume_name",
"Mountpoint": "/path/under/PropagatedMount"
}
],
"Err": ""
}
```
Respond with a string error if an error occurred. `Mountpoint` is optional.
### /VolumeDriver.Capabilities
**Request**:
```json
{}
```
Get the list of capabilities the driver supports.
The driver is not required to implement `Capabilities`. If it is not
implemented, the default values are used.
**Response**:
```json
{
"Capabilities": {
"Scope": "global"
}
}
```
Supported scopes are `global` and `local`. Any other value in `Scope` will be
ignored, and `local` is used. `Scope` allows cluster managers to handle the
volume in different ways. For instance, a scope of `global`, signals to the
cluster manager that it only needs to create the volume once instead of on each
Docker host. More capabilities may be added in the future.

1849
docs/reference/builder.md Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,160 @@
---
title: "attach"
description: "The attach command description and usage"
keywords: "attach, running, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# attach
```markdown
Usage: docker attach [OPTIONS] CONTAINER
Attach local standard input, output, and error streams to a running container
Options:
--detach-keys string Override the key sequence for detaching a container
--help Print usage
--no-stdin Do not attach STDIN
--sig-proxy Proxy all received signals to the process (default true)
```
## Description
Use `docker attach` to attach your terminal's standard input, output, and error
(or any combination of the three) to a running container using the container's
ID or name. This allows you to view its ongoing output or to control it
interactively, as though the commands were running directly in your terminal.
> **Note:**
> The `attach` command will display the output of the `ENTRYPOINT/CMD` process. This
> can appear as if the attach command is hung when in fact the process may simply
> not be interacting with the terminal at that time.
You can attach to the same contained process multiple times simultaneously,
even as a different user with the appropriate permissions.
To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the
container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to
the container. You can detach from a container and leave it running using the
`CTRL-p CTRL-q` key sequence.
> **Note:**
> A process running as PID 1 inside a container is treated specially by
> Linux: it ignores any signal with the default action. So, the process
> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do
> so.
It is forbidden to redirect the standard input of a `docker attach` command
while attaching to a tty-enabled container (i.e.: launched with `-t`).
While a client is connected to container's stdio using `docker attach`, Docker
uses a ~1MB memory buffer to maximize the throughput of the application. If
this buffer is filled, the speed of the API connection will start to have an
effect on the process output writing speed. This is similar to other
applications like SSH. Because of this, it is not recommended to run
performance critical applications that generate a lot of output in the
foreground over a slow client connection. Instead, users should use the
`docker logs` command to get access to the logs.
### Override the detach sequence
If you want, you can configure an override the Docker key sequence for detach.
This is useful if the Docker default sequence conflicts with key sequence you
use for other applications. There are two ways to define your own detach key
sequence, as a per-container override or as a configuration property on your
entire configuration.
To override the sequence for an individual container, use the
`--detach-keys="<sequence>"` flag with the `docker attach` command. The format of
the `<sequence>` is either a letter [a-Z], or the `ctrl-` combined with any of
the following:
* `a-z` (a single lowercase alpha character )
* `@` (at sign)
* `[` (left bracket)
* `\\` (two backward slashes)
* `_` (underscore)
* `^` (caret)
These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key
sequences. To configure a different configuration default key sequence for all
containers, see [**Configuration file** section](cli.md#configuration-files).
## Examples
### Attach to and detach from a running container
```bash
$ docker run -d --name topdemo ubuntu /usr/bin/top -b
$ docker attach topdemo
top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
Swap: 786428k total, 0k used, 786428k free, 221740k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
Swap: 786428k total, 0k used, 786428k free, 221776k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 373572k total, 355780k used, 17792k free, 27880k buffers
Swap: 786428k total, 0k used, 786428k free, 221776k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
^C$
$ echo $?
0
$ docker ps -a | grep topdemo
7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo
```
### Get the exit code of the container's command
And in this second example, you can see the exit code returned by the `bash`
process is returned by the `docker attach` command to its caller too:
```bash
$ docker run --name test -d -it debian
275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab
$ docker attach test
root@f38c87f2a42d:/# exit 13
exit
$ echo $?
13
$ docker ps -a | grep test
275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test
```

View File

@ -0,0 +1,581 @@
---
title: "build"
description: "The build command description and usage"
keywords: "build, docker, image"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# build
```markdown
Usage: docker build [OPTIONS] PATH | URL | -
Build an image from a Dockerfile
Options:
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
--build-arg value Set build-time variables (default [])
--cache-from value Images to consider as cache sources (default [])
--cgroup-parent string Optional parent cgroup for the container
--compress Compress the build context using gzip
--cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota
-c, --cpu-shares int CPU shares (relative weight)
--cpuset-cpus string CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems string MEMs in which to allow execution (0-3, 0,1)
--disable-content-trust Skip image verification (default true)
-f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile')
--force-rm Always remove intermediate containers
--help Print usage
--iidfile string Write the image ID to the file
--isolation string Container isolation technology
--label value Set metadata for an image (default [])
-m, --memory string Memory limit
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--network string Set the networking mode for the RUN instructions during build
'bridge': use default Docker bridge
'none': no networking
'container:<name|id>': reuse another container's network stack
'host': use the Docker host network stack
'<network-name>|<network-id>': connect to a user-defined network
--no-cache Do not use cache when building the image
--pull Always attempt to pull a newer version of the image
-q, --quiet Suppress the build output and print image ID on success
--rm Remove intermediate containers after a successful build (default true)
--security-opt value Security Options (default [])
--shm-size bytes Size of /dev/shm
The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
or `g` (gigabytes). If you omit the unit, the system uses bytes.
--squash Squash newly built layers into a single new layer (**Experimental Only**)
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
--target string Set the target build stage to build.
--ulimit value Ulimit options (default [])
```
## Description
Builds Docker images from a Dockerfile and a "context". A build's context is
the files located in the specified `PATH` or `URL`. The build process can refer
to any of the files in the context. For example, your build can use an
[*ADD*](../builder.md#add) instruction to reference a file in the
context.
The `URL` parameter can refer to three kinds of resources: Git repositories,
pre-packaged tarball contexts and plain text files.
### Git repositories
When the `URL` parameter points to the location of a Git repository, the
repository acts as the build context. The system recursively fetches the
repository and its submodules. The commit history is not preserved. A
repository is first pulled into a temporary directory on your local host. After
the that succeeds, the directory is sent to the Docker daemon as the context.
Local copy gives you the ability to access private repositories using local
user credentials, VPN's, and so forth.
> **Note:**
> If the `URL` parameter contains a fragment the system will recursively clone
> the repository and its submodules using a `git clone --recursive` command.
Git URLs accept context configuration in their fragment section, separated by a
colon `:`. The first part represents the reference that Git will check out,
this can be either a branch, a tag, or a remote reference. The second part
represents a subdirectory inside the repository that will be used as a build
context.
For example, run this command to use a directory called `docker` in the branch
`container`:
```bash
$ docker build https://github.com/docker/rootfs.git#container:docker
```
The following table represents all the valid suffixes with their build
contexts:
Build Syntax Suffix | Commit Used | Build Context Used
--------------------------------|-----------------------|-------------------
`myrepo.git` | `refs/heads/master` | `/`
`myrepo.git#mytag` | `refs/tags/mytag` | `/`
`myrepo.git#mybranch` | `refs/heads/mybranch` | `/`
`myrepo.git#pull/42/head` | `refs/pull/42/head` | `/`
`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder`
`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder`
`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder`
`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder`
### Tarball contexts
If you pass an URL to a remote tarball, the URL itself is sent to the daemon:
```bash
$ docker build http://server/context.tar.gz
```
The download operation will be performed on the host the Docker daemon is
running on, which is not necessarily the same host from which the build command
is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the
build context. Tarball contexts must be tar archives conforming to the standard
`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2',
'gzip' or 'identity' (no compression) formats.
### Text files
Instead of specifying a context, you can pass a single `Dockerfile` in the
`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`:
```bash
$ docker build - < Dockerfile
```
With Powershell on Windows, you can run:
```powershell
Get-Content Dockerfile | docker build -
```
If you use `STDIN` or specify a `URL` pointing to a plain text file, the system
places the contents into a file called `Dockerfile`, and any `-f`, `--file`
option is ignored. In this scenario, there is no context.
By default the `docker build` command will look for a `Dockerfile` at the root
of the build context. The `-f`, `--file`, option lets you specify the path to
an alternative file to use instead. This is useful in cases where the same set
of files are used for multiple builds. The path must be to a file within the
build context. If a relative path is specified then it is interpreted as
relative to the root of the context.
In most cases, it's best to put each Dockerfile in an empty directory. Then,
add to that directory only the files needed for building the Dockerfile. To
increase the build's performance, you can exclude files and directories by
adding a `.dockerignore` file to that directory as well. For information on
creating one, see the [.dockerignore file](../builder.md#dockerignore-file).
If the Docker client loses connection to the daemon, the build is canceled.
This happens if you interrupt the Docker client with `CTRL-c` or if the Docker
client is killed for any reason. If the build initiated a pull which is still
running at the time the build is cancelled, the pull is cancelled as well.
## Return code
On a successful build, a return code of success `0` will be returned. When the
build fails, a non-zero failure code will be returned.
There should be informational output of the reason for failure output to
`STDERR`:
```bash
$ docker build -t fail .
Sending build context to Docker daemon 2.048 kB
Sending build context to Docker daemon
Step 1/3 : FROM busybox
---> 4986bf8c1536
Step 2/3 : RUN exit 13
---> Running in e26670ec7a0a
INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13
$ echo $?
1
```
See also:
[*Dockerfile Reference*](../builder.md).
## Examples
### Build with PATH
```bash
$ docker build .
Uploading context 10240 bytes
Step 1/3 : FROM busybox
Pulling repository busybox
---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/
Step 2/3 : RUN ls -lh /
---> Running in 9c9e81692ae9
total 24
drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin
drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev
drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib
lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib
dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc
lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin
dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys
drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr
---> b35f4035db3f
Step 3/3 : CMD echo Hello world
---> Running in 02071fceb21b
---> f52f38b7823e
Successfully built f52f38b7823e
Removing intermediate container 9c9e81692ae9
Removing intermediate container 02071fceb21b
```
This example specifies that the `PATH` is `.`, and so all the files in the
local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies
where to find the files for the "context" of the build on the Docker daemon.
Remember that the daemon could be running on a remote machine and that no
parsing of the Dockerfile happens at the client side (where you're running
`docker build`). That means that *all* the files at `PATH` get sent, not just
the ones listed to [*ADD*](../builder.md#add) in the Dockerfile.
The transfer of context from the local machine to the Docker daemon is what the
`docker` client means when you see the "Sending build context" message.
If you wish to keep the intermediate containers after the build is complete,
you must use `--rm=false`. This does not affect the build cache.
### Build with URL
```bash
$ docker build github.com/creack/docker-firefox
```
This will clone the GitHub repository and use the cloned repository as context.
The Dockerfile at the root of the repository is used as Dockerfile. You can
specify an arbitrary Git repository by using the `git://` or `git@` scheme.
```bash
$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz
Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B
Step 1/3 : FROM busybox
---> 8c2e06607696
Step 2/3 : ADD ctx/container.cfg /
---> e7829950cee3
Removing intermediate container b35224abf821
Step 3/3 : CMD /bin/ls
---> Running in fbc63d321d73
---> 3286931702ad
Removing intermediate container fbc63d321d73
Successfully built 377c409b35e4
```
This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which
downloads and extracts the referenced tarball. The `-f ctx/Dockerfile`
parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used
to build the image. Any `ADD` commands in that `Dockerfile` that refers to local
paths must be relative to the root of the contents inside `ctx.tar.gz`. In the
example above, the tarball contains a directory `ctx/`, so the `ADD
ctx/container.cfg /` operation works as expected.
### Build with -
```bash
$ docker build - < Dockerfile
```
This will read a Dockerfile from `STDIN` without context. Due to the lack of a
context, no contents of any local directory will be sent to the Docker daemon.
Since there is no context, a Dockerfile `ADD` only works if it refers to a
remote URL.
```bash
$ docker build - < context.tar.gz
```
This will build an image for a compressed context read from `STDIN`. Supported
formats are: bzip2, gzip and xz.
### Use a .dockerignore file
```bash
$ docker build .
Uploading context 18.829 MB
Uploading context
Step 1/2 : FROM busybox
---> 769b9341d937
Step 2/2 : CMD echo Hello world
---> Using cache
---> 99cc1ad10469
Successfully built 99cc1ad10469
$ echo ".git" > .dockerignore
$ docker build .
Uploading context 6.76 MB
Uploading context
Step 1/2 : FROM busybox
---> 769b9341d937
Step 2/2 : CMD echo Hello world
---> Using cache
---> 99cc1ad10469
Successfully built 99cc1ad10469
```
This example shows the use of the `.dockerignore` file to exclude the `.git`
directory from the context. Its effect can be seen in the changed size of the
uploaded context. The builder reference contains detailed information on
[creating a .dockerignore file](../builder.md#dockerignore-file)
### Tag an image (-t)
```bash
$ docker build -t vieux/apache:2.0 .
```
This will build like the previous example, but it will then tag the resulting
image. The repository name will be `vieux/apache` and the tag will be `2.0`.
[Read more about valid tags](tag.md).
You can apply multiple tags to an image. For example, you can apply the `latest`
tag to a newly built image and add another tag that references a specific
version.
For example, to tag an image both as `whenry/fedora-jboss:latest` and
`whenry/fedora-jboss:v2.1`, use the following:
```bash
$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 .
```
### Specify a Dockerfile (-f)
```bash
$ docker build -f Dockerfile.debug .
```
This will use a file called `Dockerfile.debug` for the build instructions
instead of `Dockerfile`.
```bash
$ curl example.com/remote/Dockerfile | docker build -f - .
```
The above command will use the current directory as the build context and read
a Dockerfile from stdin.
```bash
$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug .
$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod .
```
The above commands will build the current build context (as specified by the
`.`) twice, once using a debug version of a `Dockerfile` and once using a
production version.
```bash
$ cd /home/me/myapp/some/dir/really/deep
$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp
$ docker build -f ../../../../dockerfiles/debug /home/me/myapp
```
These two `docker build` commands do the exact same thing. They both use the
contents of the `debug` file instead of looking for a `Dockerfile` and will use
`/home/me/myapp` as the root of the build context. Note that `debug` is in the
directory structure of the build context, regardless of how you refer to it on
the command line.
> **Note:**
> `docker build` will return a `no such file or directory` error if the
> file or directory does not exist in the uploaded context. This may
> happen if there is no context, or if you specify a file that is
> elsewhere on the Host system. The context is limited to the current
> directory (and its children) for security reasons, and to ensure
> repeatable builds on remote Docker hosts. This is also the reason why
> `ADD ../file` will not work.
### Use a custom parent cgroup (--cgroup-parent)
When `docker build` is run with the `--cgroup-parent` option the containers
used in the build will be run with the [corresponding `docker run`
flag](../run.md#specifying-custom-cgroups).
### Set ulimits in container (--ulimit)
Using the `--ulimit` option with `docker build` will cause each build step's
container to be started using those [`--ulimit`
flag values](./run.md#set-ulimits-in-container-ulimit).
### Set build-time variables (--build-arg)
You can use `ENV` instructions in a Dockerfile to define variable
values. These values persist in the built image. However, often
persistence is not what you want. Users want to specify variables differently
depending on which host they build an image on.
A good example is `http_proxy` or source versions for pulling intermediate
files. The `ARG` instruction lets Dockerfile authors define values that users
can set at build-time using the `--build-arg` flag:
```bash
$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 .
```
This flag allows you to pass the build-time variables that are
accessed like regular environment variables in the `RUN` instruction of the
Dockerfile. Also, these values don't persist in the intermediate or final images
like `ENV` values do.
Using this flag will not alter the output you see when the `ARG` lines from the
Dockerfile are echoed during the build process.
For detailed information on using `ARG` and `ENV` instructions, see the
[Dockerfile reference](../builder.md).
### Optional security options (--security-opt)
This flag is only supported on a daemon running on Windows, and only supports
the `credentialspec` option. The `credentialspec` must be in the format
`file://spec.txt` or `registry://keyname`.
### Specify isolation technology for container (--isolation)
This option is useful in situations where you are running Docker containers on
Windows. The `--isolation=<value>` option sets a container's isolation
technology. On Linux, the only supported is the `default` option which uses
Linux namespaces. On Microsoft Windows, you can specify these values:
| Value | Description |
|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. |
| `process` | Namespace isolation only. |
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
### Add entries to container hosts file (--add-host)
You can add other hosts into a container's `/etc/hosts` file by using one or
more `--add-host` flags. This example adds a static address for a host named
`docker`:
$ docker build --add-host=docker:10.180.0.1 .
### Specifying target build stage (--target)
When building a Dockerfile with multiple build stages, `--target` can be used to
specify an intermediate build stage by name as a final stage for the resulting
image. Commands after the target stage will be skipped.
```Dockerfile
FROM debian AS build-env
...
FROM alpine AS production-env
...
```
```bash
$ docker build -t mybuildimage --target build-env .
```
### Squash an image's layers (--squash) **Experimental Only**
#### Overview
Once the image is built, squash the new layers into a new image with a single
new layer. Squashing does not destroy any existing image, rather it creates a new
image with the content of the squashed layers. This effectively makes it look
like all `Dockerfile` commands were created with a single layer. The build
cache is preserved with this method.
**Note**: using this option means the new image will not be able to take
advantage of layer sharing with other images and may use significantly more
space.
**Note**: using this option you may see significantly more space used due to
storing two copies of the image, one for the build cache with all the cache
layers in tact, and one for the squashed version.
#### Prerequisites
The example on this page is using experimental mode in Docker 1.13.
Experimental mode can be enabled by using the `--experimental` flag when starting the Docker daemon or setting `experimental: true` in the `daemon.json` configuration file.
By default, experimental mode is disabled. To see the current configuration, use the `docker version` command.
```none
Server:
Version: 1.13.1
API version: 1.26 (minimum version 1.12)
Go version: go1.7.5
Git commit: 092cba3
Built: Wed Feb 8 06:35:24 2017
OS/Arch: linux/amd64
Experimental: false
[...]
```
To enable experimental mode, users need to restart the docker daemon with the experimental flag enabled.
#### Enable Docker experimental
Experimental features are now included in the standard Docker binaries as of version 1.13.0. For enabling experimental features, you need to start the Docker daemon with `--experimental` flag. You can also enable the daemon flag via /etc/docker/daemon.json. e.g.
```
{
"experimental": true
}
```
Then make sure the experimental flag is enabled:
```bash
$ docker version -f '{{.Server.Experimental}}'
true
```
#### Build an image with `--squash` argument
The following is an example of docker build with `--squash` argument
```Dockerfile
FROM busybox
RUN echo hello > /hello
RUN echo world >> /hello
RUN touch remove_me /remove_me
ENV HELLO world
RUN rm /remove_me
```
An image named `test` is built with `--squash` argument.
```bash
$ docker build --squash -t test .
[...]
```
If everything is right, the history will look like this:
```bash
$ docker history test
IMAGE CREATED CREATED BY SIZE COMMENT
4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb
<missing> 5 minutes ago /bin/sh -c rm /remove_me 0 B
<missing> 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B
<missing> 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B
<missing> 5 minutes ago /bin/sh -c echo world >> /hello 0 B
<missing> 6 minutes ago /bin/sh -c echo hello > /hello 0 B
<missing> 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B
<missing> 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB
```
We could find that all layer's name is `<missing>`, and there is a new layer with COMMENT `merge`.
Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is in `/hello`, make sure the `HELLO` envvar's value is `world`.

View File

@ -0,0 +1,317 @@
---
title: "Use the Docker command line"
description: "Docker's CLI command description and usage"
keywords: "Docker, Docker documentation, CLI, command line"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# docker
To list available commands, either run `docker` with no parameters
or execute `docker help`:
```bash
$ docker
Usage: docker [OPTIONS] COMMAND [ARG...]
docker [ --help | -v | --version ]
A self-sufficient runtime for containers.
Options:
--config string Location of client config files (default "/root/.docker")
-D, --debug Enable debug mode
--help Print usage
-H, --host value Daemon socket(s) to connect to (default [])
-l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info")
--tls Use TLS; implied by --tlsverify
--tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem")
--tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem")
--tlskey string Path to TLS key file (default "/root/.docker/key.pem")
--tlsverify Use TLS and verify the remote
-v, --version Print version information and quit
Commands:
attach Attach to a running container
# […]
```
## Description
Depending on your Docker system configuration, you may be required to preface
each `docker` command with `sudo`. To avoid having to use `sudo` with the
`docker` command, your system administrator can create a Unix group called
`docker` and add users to it.
For more information about installing Docker or `sudo` configuration, refer to
the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system.
### Environment variables
For easy reference, the following list of environment variables are supported
by the `docker` command line:
* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`)
* `DOCKER_CONFIG` The location of your client configuration files.
* `DOCKER_CERT_PATH` The location of your authentication keys.
* `DOCKER_DRIVER` The graph driver to use.
* `DOCKER_HOST` Daemon socket to connect to.
* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is
unsuitable for Docker.
* `DOCKER_RAMDISK` If set this will disable 'pivot_root'.
* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote.
* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images.
Equates to `--disable-content-trust=false` for build, create, pull, push, run.
* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults
to the same URL as the registry.
* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and
`docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are
printed. This may become the default in a future release, at which point this environment-variable is removed.
* `DOCKER_TMPDIR` Location for temporary Docker files.
Because Docker is developed using Go, you can also use any environment
variables used by the Go runtime. In particular, you may find these useful:
* `HTTP_PROXY`
* `HTTPS_PROXY`
* `NO_PROXY`
These Go environment variables are case-insensitive. See the
[Go specification](http://golang.org/pkg/net/http/) for details on these
variables.
### Configuration files
By default, the Docker command line stores its configuration files in a
directory called `.docker` within your `$HOME` directory. However, you can
specify a different location via the `DOCKER_CONFIG` environment variable
or the `--config` command line option. If both are specified, then the
`--config` option overrides the `DOCKER_CONFIG` environment variable.
For example:
docker --config ~/testconfigs/ ps
Instructs Docker to use the configuration files in your `~/testconfigs/`
directory when running the `ps` command.
Docker manages most of the files in the configuration directory
and you should not modify them. However, you *can modify* the
`config.json` file to control certain aspects of how the `docker`
command behaves.
Currently, you can modify the `docker` command behavior using environment
variables or command-line options. You can also use options within
`config.json` to modify some of the same behavior. When using these
mechanisms, you must keep in mind the order of precedence among them. Command
line options override environment variables and environment variables override
properties you specify in a `config.json` file.
The `config.json` file stores a JSON encoding of several properties:
The property `HttpHeaders` specifies a set of headers to include in all messages
sent from the Docker client to the daemon. Docker does not try to interpret or
understand these header; it simply puts them into the messages. Docker does
not allow these headers to change any headers it sets for itself.
The property `psFormat` specifies the default format for `docker ps` output.
When the `--format` flag is not provided with the `docker ps` command,
Docker's client uses this property. If this property is not set, the client
falls back to the default table format. For a list of supported formatting
directives, see the
[**Formatting** section in the `docker ps` documentation](ps.md)
The property `imagesFormat` specifies the default format for `docker images` output.
When the `--format` flag is not provided with the `docker images` command,
Docker's client uses this property. If this property is not set, the client
falls back to the default table format. For a list of supported formatting
directives, see the [**Formatting** section in the `docker images` documentation](images.md)
The property `pluginsFormat` specifies the default format for `docker plugin ls` output.
When the `--format` flag is not provided with the `docker plugin ls` command,
Docker's client uses this property. If this property is not set, the client
falls back to the default table format. For a list of supported formatting
directives, see the [**Formatting** section in the `docker plugin ls` documentation](plugin_ls.md)
The property `servicesFormat` specifies the default format for `docker
service ls` output. When the `--format` flag is not provided with the
`docker service ls` command, Docker's client uses this property. If this
property is not set, the client falls back to the default json format. For a
list of supported formatting directives, see the
[**Formatting** section in the `docker service ls` documentation](service_ls.md)
The property `serviceInspectFormat` specifies the default format for `docker
service inspect` output. When the `--format` flag is not provided with the
`docker service inspect` command, Docker's client uses this property. If this
property is not set, the client falls back to the default json format. For a
list of supported formatting directives, see the
[**Formatting** section in the `docker service inspect` documentation](service_inspect.md)
The property `statsFormat` specifies the default format for `docker
stats` output. When the `--format` flag is not provided with the
`docker stats` command, Docker's client uses this property. If this
property is not set, the client falls back to the default table
format. For a list of supported formatting directives, see
[**Formatting** section in the `docker stats` documentation](stats.md)
The property `secretFormat` specifies the default format for `docker
secret ls` output. When the `--format` flag is not provided with the
`docker secret ls` command, Docker's client uses this property. If this
property is not set, the client falls back to the default table
format. For a list of supported formatting directives, see
[**Formatting** section in the `docker secret ls` documentation](secret_ls.md)
The property `nodesFormat` specifies the default format for `docker node ls` output.
When the `--format` flag is not provided with the `docker node ls` command,
Docker's client uses the value of `nodesFormat`. If the value of `nodesFormat` is not set,
the client uses the default table format. For a list of supported formatting
directives, see the [**Formatting** section in the `docker node ls` documentation](node_ls.md)
The property `configFormat` specifies the default format for `docker
config ls` output. When the `--format` flag is not provided with the
`docker config ls` command, Docker's client uses this property. If this
property is not set, the client falls back to the default table
format. For a list of supported formatting directives, see
[**Formatting** section in the `docker config ls` documentation](config_ls.md)
The property `credsStore` specifies an external binary to serve as the default
credential store. When this property is set, `docker login` will attempt to
store credentials in the binary specified by `docker-credential-<value>` which
is visible on `$PATH`. If this property is not set, credentials will be stored
in the `auths` property of the config. For more information, see the
[**Credentials store** section in the `docker login` documentation](login.md#credentials-store)
The property `credHelpers` specifies a set of credential helpers to use
preferentially over `credsStore` or `auths` when storing and retrieving
credentials for specific registries. If this property is set, the binary
`docker-credential-<value>` will be used when storing or retrieving credentials
for a specific registry. For more information, see the
[**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers)
Once attached to a container, users detach from it and leave it running using
the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable
using the `detachKeys` property. Specify a `<sequence>` value for the
property. The format of the `<sequence>` is a comma-separated list of either
a letter [a-Z], or the `ctrl-` combined with any of the following:
* `a-z` (a single lowercase alpha character )
* `@` (at sign)
* `[` (left bracket)
* `\\` (two backward slashes)
* `_` (underscore)
* `^` (caret)
Your customization applies to all containers started in with your Docker client.
Users can override your custom or the default key sequence on a per-container
basis. To do this, the user specifies the `--detach-keys` flag with the `docker
attach`, `docker exec`, `docker run` or `docker start` command.
Following is a sample `config.json` file:
```json
{
"HttpHeaders": {
"MyHeader": "MyValue"
},
"psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}",
"imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}",
"pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}",
"statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}",
"servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}",
"secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}",
"configFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}",
"serviceInspectFormat": "pretty",
"nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}",
"detachKeys": "ctrl-e,e",
"credsStore": "secretservice",
"credHelpers": {
"awesomereg.example.org": "hip-star",
"unicorn.example.com": "vcbait"
}
}
```
### Notary
If using your own notary server and a self-signed certificate or an internal
Certificate Authority, you need to place the certificate at
`tls/<registry_url>/ca.crt` in your docker config directory.
Alternatively you can trust the certificate globally by adding it to your system's
list of root Certificate Authorities.
## Examples
### Display help text
To list the help on any command just execute the command, followed by the
`--help` option.
$ docker run --help
Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
Run a command in a new container
Options:
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
-a, --attach value Attach to STDIN, STDOUT or STDERR (default [])
...
### Option types
Single character command line options can be combined, so rather than
typing `docker run -i -t --name test busybox sh`,
you can write `docker run -it --name test busybox sh`.
#### Boolean
Boolean options take the form `-d=false`. The value you see in the help text is
the default value which is set if you do **not** specify that flag. If you
specify a Boolean flag without a value, this will set the flag to `true`,
irrespective of the default value.
For example, running `docker run -d` will set the value to `true`, so your
container **will** run in "detached" mode, in the background.
Options which default to `true` (e.g., `docker build --rm=true`) can only be
set to the non-default value by explicitly setting them to `false`:
```bash
$ docker build --rm=false .
```
#### Multi
You can specify options like `-a=[]` multiple times in a single command line,
for example in these commands:
```bash
$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls
```
Sometimes, multiple options can call for a more complex value string as for
`-v`:
```bash
$ docker run -v /host:/container example/mysql
```
> **Note**: Do not use the `-t` and `-a stderr` options together due to
> limitations in the `pty` implementation. All `stderr` in `pty` mode
> simply goes to `stdout`.
#### Strings and Integers
Options like `--name=""` expect a string, and they
can only be specified once. Options like `-c=0`
expect an integer, and they can only be specified once.

View File

@ -0,0 +1,117 @@
---
title: "commit"
description: "The commit command description and usage"
keywords: "commit, file, changes"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# commit
```markdown
Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
Create a new image from a container's changes
Options:
-a, --author string Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
-c, --change value Apply Dockerfile instruction to the created image (default [])
--help Print usage
-m, --message string Commit message
-p, --pause Pause container during commit (default true)
```
## Description
It can be useful to commit a container's file changes or settings into a new
image. This allows you to debug a container by running an interactive shell, or to
export a working dataset to another server. Generally, it is better to use
Dockerfiles to manage your images in a documented and maintainable way.
[Read more about valid image names and tags](tag.md).
The commit operation will not include any data contained in
volumes mounted inside the container.
By default, the container being committed and its processes will be paused
while the image is committed. This reduces the likelihood of encountering data
corruption during the process of creating the commit. If this behavior is
undesired, set the `--pause` option to false.
The `--change` option will apply `Dockerfile` instructions to the image that is
created. Supported `Dockerfile` instructions:
`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
## Examples
### Commit a container
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton
$ docker commit c3f279d17e0a svendowideit/testimage:version3
f5283438590d
$ docker images
REPOSITORY TAG ID CREATED SIZE
svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
```
### Commit a container with new configurations
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton
$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a
[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin]
$ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3
f5283438590d
$ docker inspect -f "{{ .Config.Env }}" f5283438590d
[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true]
```
### Commit a container with new `CMD` and `EXPOSE` instructions
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton
$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4
f5283438590d
$ docker run -d svendowideit/testimage:version4
89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton
```

View File

@ -0,0 +1,61 @@
---
title: "container"
description: "The container command description and usage"
keywords: "container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# container
```markdown
Usage: docker container COMMAND
Manage containers
Options:
--help Print usage
Commands:
attach Attach to a running container
commit Create a new image from a container's changes
cp Copy files/folders between a container and the local filesystem
create Create a new container
diff Inspect changes to files or directories on a container's filesystem
exec Run a command in a running container
export Export a container's filesystem as a tar archive
inspect Display detailed information on one or more containers
kill Kill one or more running containers
logs Fetch the logs of a container
ls List containers
pause Pause all processes within one or more containers
port List port mappings or a specific mapping for the container
prune Remove all stopped containers
rename Rename a container
restart Restart one or more containers
rm Remove one or more containers
run Run a command in a new container
start Start one or more stopped containers
stats Display a live stream of container(s) resource usage statistics
stop Stop one or more running containers
top Display the running processes of a container
unpause Unpause all processes within one or more containers
update Update configuration of one or more containers
wait Block until one or more containers stop, then print their exit codes
Run 'docker container COMMAND --help' for more information on a command.
```
## Description
Manage containers.

View File

@ -0,0 +1,126 @@
---
title: "container prune"
description: "Remove all stopped containers"
keywords: container, prune, delete, remove
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# container prune
```markdown
Usage: docker container prune [OPTIONS]
Remove all stopped containers
Options:
Options:
--filter filter Provide filter values (e.g. 'until=<timestamp>')
-f, --force Do not prompt for confirmation
--help Print usage
```
## Description
Removes all stopped containers.
## Examples
### Prune containers
```bash
$ docker container prune
WARNING! This will remove all stopped containers.
Are you sure you want to continue? [y/N] y
Deleted Containers:
4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063
f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360
Total reclaimed space: 212 B
```
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* until (`<timestamp>`) - only remove containers created before given timestamp
* label (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) - only remove containers with (or without, in case `label!=...` is used) the specified labels.
The `until` filter can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the daemon machines time. Supported formats for date
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the daemon will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long.
The `label` filter accepts two formats. One is the `label=...` (`label=<key>` or `label=<key>=<value>`),
which removes containers with the specified labels. The other
format is the `label!=...` (`label!=<key>` or `label!=<key>=<value>`), which removes
containers without the specified labels.
The following removes containers created more than 5 minutes ago:
```bash
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 41 seconds ago
53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 12 minutes ago
$ docker container prune --force --filter "until=5m"
Deleted Containers:
53a9bc23a5168b6caa2bfbefddf1b30f93c7ad57f3dec271fd32707497cb9369
Total reclaimed space: 25 B
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 44 seconds ago
```
The following removes containers created before `2017-01-04T13:10:00`:
```bash
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 7 minutes ago
4a75091a6d61 busybox "sh" 2017-01-04 13:09:53 -0800 PST Exited (0) 9 minutes ago
$ docker container prune --force --filter "until=2017-01-04T13:10:00"
Deleted Containers:
4a75091a6d618526fcd8b33ccd6e5928ca2a64415466f768a6180004b0c72c6c
Total reclaimed space: 27 B
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 9 minutes ago
```
## Related commands
* [system df](system_df.md)
* [volume prune](volume_prune.md)
* [image prune](image_prune.md)
* [network prune](network_prune.md)
* [system prune](system_prune.md)

View File

@ -0,0 +1,115 @@
---
title: "cp"
description: "The cp command description and usage"
keywords: "copy, container, files, folders"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# cp
```markdown
Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|-
docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH
Copy files/folders between a container and the local filesystem
Use '-' as the source to read a tar archive from stdin
and extract it to a directory destination in a container.
Use '-' as the destination to stream a tar archive of a
container source to stdout.
Options:
-L, --follow-link Always follow symbol link in SRC_PATH
--help Print usage
```
## Description
The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`.
You can copy from the container's file system to the local machine or the
reverse, from the local filesystem to the container. If `-` is specified for
either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from
`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container.
The `SRC_PATH` or `DEST_PATH` can be a file or directory.
The `docker cp` command assumes container paths are relative to the container's
`/` (root) directory. This means supplying the initial forward slash is optional;
The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and
`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can
be an absolute or relative value. The command interprets a local machine's
relative paths as relative to the current working directory where `docker cp` is
run.
The `cp` command behaves like the Unix `cp -a` command in that directories are
copied recursively with permissions preserved if possible. Ownership is set to
the user and primary group at the destination. For example, files copied to a
container are created with `UID:GID` of the root user. Files copied to the local
machine are created with the `UID:GID` of the user which invoked the `docker cp`
command. If you specify the `-L` option, `docker cp` follows any symbolic link
in the `SRC_PATH`. `docker cp` does *not* create parent directories for
`DEST_PATH` if they do not exist.
Assuming a path separator of `/`, a first argument of `SRC_PATH` and second
argument of `DEST_PATH`, the behavior is as follows:
- `SRC_PATH` specifies a file
- `DEST_PATH` does not exist
- the file is saved to a file created at `DEST_PATH`
- `DEST_PATH` does not exist and ends with `/`
- Error condition: the destination directory must exist.
- `DEST_PATH` exists and is a file
- the destination is overwritten with the source file's contents
- `DEST_PATH` exists and is a directory
- the file is copied into this directory using the basename from
`SRC_PATH`
- `SRC_PATH` specifies a directory
- `DEST_PATH` does not exist
- `DEST_PATH` is created as a directory and the *contents* of the source
directory are copied into this directory
- `DEST_PATH` exists and is a file
- Error condition: cannot copy a directory to a file
- `DEST_PATH` exists and is a directory
- `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_)
- the source directory is copied into this directory
- `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_)
- the *content* of the source directory is copied into this
directory
The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above
rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not
the target, is copied by default. To copy the link target and not the link, specify
the `-L` option.
A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can
also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local
machine, for example `file:name.txt`. If you use a `:` in a local machine path,
you must be explicit with a relative or absolute path, for example:
`/path/to/file:name.txt` or `./file:name.txt`
It is not possible to copy certain system files such as resources under
`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by
the user in the container. However, you can still copy such files by manually
running `tar` in `docker exec`. Both of the following examples do the same thing
in different ways (consider `SRC_PATH` and `DEST_PATH` are directories):
```bash
$ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH -
```
```bash
$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH -
```
Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive.
The command extracts the content of the tar to the `DEST_PATH` in container's
filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as
the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`.

View File

@ -0,0 +1,260 @@
---
title: "create"
description: "The create command description and usage"
keywords: "docker, create, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# create
Creates a new container.
```markdown
Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...]
Create a new container
Options:
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
-a, --attach value Attach to STDIN, STDOUT or STDERR (default [])
--blkio-weight value Block IO (relative weight), between 10 and 1000
--blkio-weight-device value Block IO weight (relative device weight) (default [])
--cap-add value Add Linux capabilities (default [])
--cap-drop value Drop Linux capabilities (default [])
--cgroup-parent string Optional parent cgroup for the container
--cidfile string Write the container ID to the file
--cpu-count int The number of CPUs available for execution by the container.
Windows daemon only. On Windows Server containers, this is
approximated as a percentage of total CPU usage.
--cpu-percent int CPU percent (Windows only)
--cpu-period int Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota
-c, --cpu-shares int CPU shares (relative weight)
--cpus NanoCPUs Number of CPUs (default 0.000)
--cpu-rt-period int Limit the CPU real-time period in microseconds
--cpu-rt-runtime int Limit the CPU real-time runtime in microseconds
--cpuset-cpus string CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems string MEMs in which to allow execution (0-3, 0,1)
--device value Add a host device to the container (default [])
--device-cgroup-rule value Add a rule to the cgroup allowed devices list
--device-read-bps value Limit read rate (bytes per second) from a device (default [])
--device-read-iops value Limit read rate (IO per second) from a device (default [])
--device-write-bps value Limit write rate (bytes per second) to a device (default [])
--device-write-iops value Limit write rate (IO per second) to a device (default [])
--disable-content-trust Skip image verification (default true)
--dns value Set custom DNS servers (default [])
--dns-option value Set DNS options (default [])
--dns-search value Set custom DNS search domains (default [])
--entrypoint string Overwrite the default ENTRYPOINT of the image
-e, --env value Set environment variables (default [])
--env-file value Read in a file of environment variables (default [])
--expose value Expose a port or a range of ports (default [])
--group-add value Add additional groups to join (default [])
--health-cmd string Command to run to check health
--health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s)
--health-retries int Consecutive failures needed to report unhealthy
--health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)
--health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s)
--help Print usage
-h, --hostname string Container host name
--init Run an init inside the container that forwards signals and reaps processes
-i, --interactive Keep STDIN open even if not attached
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops uint Maximum IOps limit for the system drive (Windows only)
--ip string IPv4 address (e.g., 172.30.100.104)
--ip6 string IPv6 address (e.g., 2001:db8::33)
--ipc string IPC namespace to use
--isolation string Container isolation technology
--kernel-memory string Kernel memory limit
-l, --label value Set meta data on a container (default [])
--label-file value Read in a line delimited file of labels (default [])
--link value Add link to another container (default [])
--link-local-ip value Container IPv4/IPv6 link-local addresses (default [])
--log-driver string Logging driver for the container
--log-opt value Log driver options (default [])
--mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33)
-m, --memory string Memory limit
--memory-reservation string Memory soft limit
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness int Tune container memory swappiness (0 to 100) (default -1)
--mount value Attach a filesytem mount to the container (default [])
--name string Assign a name to the container
--network-alias value Add network-scoped alias for the container (default [])
--network string Connect a container to a network (default "default")
'bridge': create a network stack on the default Docker bridge
'none': no networking
'container:<name|id>': reuse another container's network stack
'host': use the Docker host network stack
'<network-name>|<network-id>': connect to a user-defined network
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj int Tune host's OOM preferences (-1000 to 1000)
--pid string PID namespace to use
--pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3
--privileged Give extended privileges to this container
-p, --publish value Publish a container's port(s) to the host (default [])
-P, --publish-all Publish all exposed ports to random ports
--read-only Mount the container's root filesystem as read only
--restart string Restart policy to apply when a container exits (default "no")
Possible values are: no, on-failure[:max-retry], always, unless-stopped
--rm Automatically remove the container when it exits
--runtime string Runtime to use for this container
--security-opt value Security Options (default [])
--shm-size bytes Size of /dev/shm
The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
or `g` (gigabytes). If you omit the unit, the system uses bytes.
--stop-signal string Signal to stop a container (default "SIGTERM")
--stop-timeout=10 Timeout (in seconds) to stop a container
--storage-opt value Storage driver options for the container (default [])
--sysctl value Sysctl options (default map[])
--tmpfs value Mount a tmpfs directory (default [])
-t, --tty Allocate a pseudo-TTY
--ulimit value Ulimit options (default [])
-u, --user string Username or UID (format: <name|uid>[:<group|gid>])
--userns string User namespace to use
'host': Use the Docker host user namespace
'': Use the Docker daemon user namespace specified by `--userns-remap` option.
--uts string UTS namespace to use
-v, --volume value Bind mount a volume (default []). The format
is `[host-src:]container-dest[:<options>]`.
The comma-delimited `options` are [rw|ro],
[z|Z], [[r]shared|[r]slave|[r]private],
[delegated|cached|consistent], and
[nocopy]. The 'host-src' is an absolute path
or a name value.
--volume-driver string Optional volume driver for the container
--volumes-from value Mount volumes from the specified container(s) (default [])
-w, --workdir string Working directory inside the container
```
## Description
The `docker create` command creates a writeable container layer over the
specified image and prepares it for running the specified command. The
container ID is then printed to `STDOUT`. This is similar to `docker run -d`
except the container is never started. You can then use the
`docker start <container_id>` command to start the container at any point.
This is useful when you want to set up a container configuration ahead of time
so that it is ready to start when you need it. The initial status of the
new container is `created`.
Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details.
## Examples
### Create and start a container
```bash
$ docker create -t -i fedora bash
6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752
$ docker start -a -i 6d8af538ec5
bash-4.2#
```
### Initialize volumes
As of v1.4.0 container volumes are initialized during the `docker create` phase
(i.e., `docker run` too). For example, this allows you to `create` the `data`
volume container, and then use it from another container:
```bash
$ docker create -v /data --name data ubuntu
240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57
$ docker run --rm --volumes-from data ubuntu ls -la /data
total 8
drwxr-xr-x 2 root root 4096 Dec 5 04:10 .
drwxr-xr-x 48 root root 4096 Dec 5 04:11 ..
```
Similarly, `create` a host directory bind mounted volume container, which can
then be used from the subsequent container:
```bash
$ docker create -v /home/docker:/docker --name docker ubuntu
9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03
$ docker run --rm --volumes-from docker ubuntu ls -la /docker
total 20
drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 .
drwxr-xr-x 48 root root 4096 Dec 5 04:13 ..
-rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history
-rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc
-rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig
drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local
-rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile
drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh
drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker
```
Set storage driver options per container.
```bash
$ docker create -it --storage-opt size=120G fedora /bin/bash
```
This (size) will allow to set the container rootfs size to 120G at creation time.
This option is only available for the `devicemapper`, `btrfs`, `overlay2`,
`windowsfilter` and `zfs` graph drivers.
For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers,
user cannot pass a size less than the Default BaseFS Size.
For the `overlay2` storage driver, the size option is only available if the
backing fs is `xfs` and mounted with the `pquota` mount option.
Under these conditions, user can pass any size less then the backing fs size.
### Specify isolation technology for container (--isolation)
This option is useful in situations where you are running Docker containers on
Windows. The `--isolation=<value>` option sets a container's isolation
technology. On Linux, the only supported is the `default` option which uses
Linux namespaces. On Microsoft Windows, you can specify these values:
| Value | Description |
|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the
daemon is running on Windows server, or `hyperv` if running on Windows client. |
| `process` | Namespace isolation only. |
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
### Dealing with dynamically created devices (--device-cgroup-rule)
Devices available to a container are assigned at creation time. The
assigned devices will both be added to the cgroup.allow file and
created into the container once it is run. This poses a problem when
a new device needs to be added to running container.
One of the solution is to add a more permissive rule to a container
allowing it access to a wider range of devices. For example, supposing
our container needs access to a character device with major `42` and
any number of minor number (added as new devices appear), the
following rule would be added:
```
docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
```
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`
the required device when it is added.
NOTE: initially present devices still need to be explicitely added to
the create/run command

View File

@ -0,0 +1,112 @@
---
title: "deploy"
description: "The deploy command description and usage"
keywords: "stack, deploy"
advisory: "experimental"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# deploy (experimental)
An alias for `stack deploy`.
```markdown
Usage: docker deploy [OPTIONS] STACK
Deploy a new stack or update an existing stack
Aliases:
deploy, up
Options:
--bundle-file string Path to a Distributed Application Bundle file
--compose-file string Path to a Compose file
--help Print usage
--prune Prune services that are no longer referenced
--with-registry-auth Send registry authentication details to Swarm agents
```
## Description
Create and update a stack from a `compose` or a `dab` file on the swarm. This command
has to be run targeting a manager node.
## Examples
### Compose file
The `deploy` command supports compose file version `3.0` and above.
```bash
$ docker stack deploy --compose-file docker-compose.yml vossibility
Ignoring unsupported options: links
Creating network vossibility_vossibility
Creating network vossibility_default
Creating service vossibility_nsqd
Creating service vossibility_logstash
Creating service vossibility_elasticsearch
Creating service vossibility_kibana
Creating service vossibility_ghollector
Creating service vossibility_lookupd
```
You can verify that the services were correctly created
```bash
$ docker service ls
ID NAME MODE REPLICAS IMAGE
29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe
axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba
```
### DAB file
```bash
$ docker stack deploy --bundle-file vossibility-stack.dab vossibility
Loading bundle from vossibility-stack.dab
Creating service vossibility_elasticsearch
Creating service vossibility_kibana
Creating service vossibility_logstash
Creating service vossibility_lookupd
Creating service vossibility_nsqd
Creating service vossibility_vossibility-collector
```
You can verify that the services were correctly created:
```bash
$ docker service ls
ID NAME MODE REPLICAS IMAGE
29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe
axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba
```
## Related commands
* [stack config](stack_config.md)
* [stack deploy](stack_deploy.md)
* [stack ls](stack_ls.md)
* [stack ps](stack_ps.md)
* [stack rm](stack_rm.md)
* [stack services](stack_services.md)

View File

@ -0,0 +1,67 @@
---
title: "diff"
description: "The diff command description and usage"
keywords: "list, changed, files, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# diff
```markdown
Usage: docker diff CONTAINER
Inspect changes to files or directories on a container's filesystem
Options:
--help Print usage
```
## Description
List the changed files and directories in a container᾿s filesystem since the
container was created. Three different types of change are tracked:
| Symbol | Description |
|--------|---------------------------------|
| `A` | A file or directory was added |
| `D` | A file or directory was deleted |
| `C` | A file or directory was changed |
You can use the full or shortened container ID or the container name set using
`docker run --name` option.
## Examples
Inspect the changes to an `nginx` container:
```bash
$ docker diff 1fdfd1f54c1b
C /dev
C /dev/console
C /dev/core
C /dev/stdout
C /dev/fd
C /dev/ptmx
C /dev/stderr
C /dev/stdin
C /run
A /run/nginx.pid
C /var/lib/nginx/tmp
A /var/lib/nginx/tmp/client_body
A /var/lib/nginx/tmp/fastcgi
A /var/lib/nginx/tmp/proxy
A /var/lib/nginx/tmp/scgi
A /var/lib/nginx/tmp/uwsgi
C /var/log/nginx
A /var/log/nginx/access.log
A /var/log/nginx/error.log
```

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,345 @@
---
title: "events"
description: "The events command description and usage"
keywords: "events, container, report"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/moby/moby/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# events
```markdown
Usage: docker events [OPTIONS]
Get real time events from the server
Options:
-f, --filter value Filter output based on conditions provided (default [])
--format string Format the output using the given Go template
--help Print usage
--since string Show all events created since timestamp
--until string Stream events until this timestamp
```
## Description
Use `docker events` to get real-time events from the server. These events differ
per Docker object type.
### Object types
#### Containers
Docker containers report the following events:
- `attach`
- `commit`
- `copy`
- `create`
- `destroy`
- `detach`
- `die`
- `exec_create`
- `exec_detach`
- `exec_start`
- `export`
- `health_status`
- `kill`
- `oom`
- `pause`
- `rename`
- `resize`
- `restart`
- `start`
- `stop`
- `top`
- `unpause`
- `update`
#### Images
Docker images report the following events:
- `delete`
- `import`
- `load`
- `pull`
- `push`
- `save`
- `tag`
- `untag`
#### Plugins
Docker plugins report the following events:
- `install`
- `enable`
- `disable`
- `remove`
#### Volumes
Docker volumes report the following events:
- `create`
- `mount`
- `unmount`
- `destroy`
#### Networks
Docker networks report the following events:
- `create`
- `connect`
- `disconnect`
- `destroy`
#### Daemons
Docker daemons report the following events:
- `reload`
### Limiting, filtering, and formatting the output
#### Limit events by time
The `--since` and `--until` parameters can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the client machines time. If you do not provide the `--since` option,
the command returns only new and/or live events. Supported formats for date
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long.
#### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If you would
like to use multiple filters, pass multiple flags (e.g.,
`--filter "foo=bar" --filter "bif=baz"`)
Using the same filter multiple times will be handled as a *OR*; for example
`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display
events for container 588a23dac085 *OR* container a8f7720b8c22
Using multiple filters will be handled as a *AND*; for example
`--filter container=588a23dac085 --filter event=start` will display events for
container container 588a23dac085 *AND* the event type is *start*
The currently supported filters are:
* container (`container=<name or id>`)
* daemon (`daemon=<name or id>`)
* event (`event=<event action>`)
* image (`image=<tag or id>`)
* label (`label=<key>` or `label=<key>=<value>`)
* network (`network=<name or id>`)
* plugin (`plugin=<name or id>`)
* type (`type=<container or image or volume or network or daemon or plugin>`)
* volume (`volume=<name or id>`)
#### Format
If a format (`--format`) is specified, the given template will be executed
instead of the default
format. Go's [text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
If a format is set to `{{json .}}`, the events are streamed as valid JSON
Lines. For information about JSON Lines, please refer to http://jsonlines.org/ .
## Examples
### Basic example
You'll need two shells for this example.
**Shell 1: Listening for events:**
```bash
$ docker events
```
**Shell 2: Start and Stop containers:**
```bash
$ docker create --name test alpine:latest top
$ docker start test
$ docker stop test
```
**Shell 1: (Again .. now showing events):**
```none
2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
```
To exit the `docker events` command, use `CTRL+C`.
### Filter events by time
You can filter the output by an absolute timestamp or relative time on the host
machine, using the following different time syntaxes:
```bash
$ docker events --since 1483283804
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
$ docker events --since '2017-01-05'
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
$ docker events --since '2013-09-03T15:49:29'
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
$ docker events --since '10m'
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
```
### Filter events by criteria
The following commands show several different ways to filter the `docker event`
output.
```bash
$ docker events --filter 'event=stop'
2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain)
$ docker events --filter 'image=alpine'
2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner)
2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15)
2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9)
2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner)
2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner)
$ docker events --filter 'container=test'
2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8'
2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9)
2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test)
2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test)
$ docker events --filter 'container=test' --filter 'event=stop'
2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test)
$ docker events --filter 'type=volume'
2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate)
2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local)
2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
$ docker events --filter 'type=network'
2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge)
2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge)
$ docker events --filter 'container=container_1' --filter 'container=container_2'
2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8)
2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
$ docker events --filter 'type=volume'
2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate)
2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local)
2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
$ docker events --filter 'type=network'
2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge)
2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge)
$ docker events --filter 'type=plugin'
2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
```
### Format the output
```bash
$ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}'
Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
```
#### Format as JSON
```none
$ docker events --format '{{json .}}'
{"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
{"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
{"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e..
{"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42..
{"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
```

View File

@ -0,0 +1,99 @@
---
title: "exec"
description: "The exec command description and usage"
keywords: "command, container, run, execute"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# exec
```markdown
Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...]
Run a command in a running container
Options:
-d, --detach Detached mode: run command in the background
--detach-keys Override the key sequence for detaching a container
-e, --env=[] Set environment variables
--help Print usage
-i, --interactive Keep STDIN open even if not attached
--privileged Give extended privileges to the command
-t, --tty Allocate a pseudo-TTY
-u, --user Username or UID (format: <name|uid>[:<group|gid>])
```
## Description
The `docker exec` command runs a new command in a running container.
The command started using `docker exec` only runs while the container's primary
process (`PID 1`) is running, and it is not restarted if the container is
restarted.
COMMAND will run in the default directory of the container. It the
underlying image has a custom directory specified with the WORKDIR directive
in its Dockerfile, this will be used instead.
COMMAND should be an executable, a chained or a quoted command
will not work. Example: `docker exec -ti my_container "echo a && echo b"` will
not work, but `docker exec -ti my_container sh -c "echo a && echo b"` will.
## Examples
### Run `docker exec` on a running container
First, start a container.
```bash
$ docker run --name ubuntu_bash --rm -i -t ubuntu bash
```
This will create a container named `ubuntu_bash` and start a Bash session.
Next, execute a command on the container.
```bash
$ docker exec -d ubuntu_bash touch /tmp/execWorks
```
This will create a new file `/tmp/execWorks` inside the running container
`ubuntu_bash`, in the background.
Next, execute an interactive `bash` shell on the container.
```bash
$ docker exec -it ubuntu_bash bash
```
This will create a new Bash session in the container `ubuntu_bash`.
### Try to run `docker exec` on a paused container
If the container is paused, then the `docker exec` command will fail with an error:
```bash
$ docker pause test
test
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test
$ docker exec test ls
FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec
$ echo $?
1
```

View File

@ -0,0 +1,48 @@
---
title: "export"
description: "The export command description and usage"
keywords: "export, file, system, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# export
```markdown
Usage: docker export [OPTIONS] CONTAINER
Export a container's filesystem as a tar archive
Options:
--help Print usage
-o, --output string Write to a file, instead of STDOUT
```
## Description
The `docker export` command does not export the contents of volumes associated
with the container. If a volume is mounted on top of an existing directory in
the container, `docker export` will export the contents of the *underlying*
directory, not the contents of the volume.
Refer to [Backup, restore, or migrate data volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes)
in the user guide for examples on exporting data in a volume.
## Examples
Each of these commands has the same result.
```bash
$ docker export red_panda > latest.tar
```
```bash
$ docker export --output="latest.tar" red_panda
```

View File

@ -0,0 +1,93 @@
---
title: "history"
description: "The history command description and usage"
keywords: "docker, image, history"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# history
```markdown
Usage: docker history [OPTIONS] IMAGE
Show the history of an image
Options:
--format string Pretty-print images using a Go template
--help Print usage
-H, --human Print sizes and dates in human readable format (default true)
--no-trunc Don't truncate output
-q, --quiet Only show numeric IDs
```
## Examples
To see how the `docker:latest` image was built:
```bash
$ docker history docker
IMAGE CREATED CREATED BY SIZE COMMENT
3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B
8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB
be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB
4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB
750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi <ad 0 B
511136ea3c5a 9 months ago 0 B Imported from -
```
To see how the `docker:apache` image was added to a container's base image:
```bash
$ docker history docker:scm
IMAGE CREATED CREATED BY SIZE COMMENT
2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image
88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB
c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B
511136ea3c5a 19 months ago 0 B Imported from -
```
### Format the output
The formatting option (`--format`) will pretty-prints history output
using a Go template.
Valid placeholders for the Go template are listed below:
| Placeholder | Description |
| --------------- | ----------- |
| `.ID` | Image ID |
| `.CreatedSince` | Elapsed time since the image was created if `--human=true`, otherwise timestamp of when image was created |
| `.CreatedAt` | Timestamp of when image was created |
| `.CreatedBy` | Command that was used to create the image |
| `.Size` | Image disk size |
| `.Comment` | Comment for image |
When using the `--format` option, the `history` command will either
output the data exactly as the template declares or, when using the
`table` directive, will include column headers as well.
The following example uses a template without headers and outputs the
`ID` and `CreatedSince` entries separated by a colon for all images:
```bash
$ docker images --format "{{.ID}}: {{.Created}} ago"
cc1b61406712: 2 weeks ago
<missing>: 2 weeks ago
<missing>: 2 weeks ago
<missing>: 2 weeks ago
<missing>: 2 weeks ago
<missing>: 3 weeks ago
<missing>: 3 weeks ago
<missing>: 3 weeks ago
```

View File

@ -0,0 +1,47 @@
---
title: "image"
description: "The image command description and usage"
keywords: "image"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# image
```markdown
Usage: docker image COMMAND
Manage images
Options:
--help Print usage
Commands:
build Build an image from a Dockerfile
history Show the history of an image
import Import the contents from a tarball to create a filesystem image
inspect Display detailed information on one or more images
load Load an image from a tar archive or STDIN
ls List images
prune Remove unused images
pull Pull an image or a repository from a registry
push Push an image or a repository to a registry
rm Remove one or more images
save Save one or more images to a tar archive (streamed to STDOUT by default)
tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
Run 'docker image COMMAND --help' for more information on a command.
```
## Description
Manage images.

View File

@ -0,0 +1,171 @@
---
title: "image prune"
description: "Remove all stopped images"
keywords: "image, prune, delete, remove"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# image prune
```markdown
Usage: docker image prune [OPTIONS]
Remove unused images
Options:
-a, --all Remove all unused images, not just dangling ones
--filter filter Provide filter values (e.g. 'until=<timestamp>')
-f, --force Do not prompt for confirmation
--help Print usage
```
## Description
Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container.
## Examples
Example output:
```bash
$ docker image prune -a
WARNING! This will remove all images without at least one container associated to them.
Are you sure you want to continue? [y/N] y
Deleted Images:
untagged: alpine:latest
untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a
deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba
deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f
untagged: alpine:3.3
untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423
untagged: my-jq:latest
deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff
deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65
deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7
untagged: my-curl:latest
deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e
deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9
deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e
deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec
deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06
deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c
deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35
deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809
deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0
deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac
deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b
deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1
Total reclaimed space: 16.43 MB
```
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* until (`<timestamp>`) - only remove images created before given timestamp
* label (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) - only remove images with (or without, in case `label!=...` is used) the specified labels.
The `until` filter can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the daemon machines time. Supported formats for date
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the daemon will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long.
The `label` filter accepts two formats. One is the `label=...` (`label=<key>` or `label=<key>=<value>`),
which removes images with the specified labels. The other
format is the `label!=...` (`label!=<key>` or `label!=<key>=<value>`), which removes
images without the specified labels.
The following removes images created before `2017-01-04T00:00:00`:
```bash
$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}'
REPOSITORY TAG IMAGE ID CREATED AT SIZE
foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB
alpine latest 88e169ea8f46 2016-12-27 10:17:25 -0800 PST 3.98 MB
busybox latest e02e811dd08f 2016-10-07 14:03:58 -0700 PDT 1.09 MB
$ docker image prune -a --force --filter "until=2017-01-04T00:00:00"
Deleted Images:
untagged: alpine:latest
untagged: alpine@sha256:dfbd4a3a8ebca874ebd2474f044a0b33600d4523d03b0df76e5c5986cb02d7e8
untagged: busybox:latest
untagged: busybox@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912
deleted: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba
deleted: sha256:e88b3f82283bc59d5e0df427c824e9f95557e661fcb0ea15fb0fb6f97760f9d9
Total reclaimed space: 1.093 MB
$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}'
REPOSITORY TAG IMAGE ID CREATED AT SIZE
foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB
```
The following removes images created more than 10 days (`240h`) ago:
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
foo latest 2f287ac753da 14 seconds ago 3.98 MB
alpine latest 88e169ea8f46 8 days ago 3.98 MB
debian jessie 7b0a06c805e8 2 months ago 123 MB
busybox latest e02e811dd08f 2 months ago 1.09 MB
golang 1.7.0 138c2e655421 4 months ago 670 MB
$ docker image prune -a --force --filter "until=240h"
Deleted Images:
untagged: golang:1.7.0
untagged: golang@sha256:6765038c2b8f407fd6e3ecea043b44580c229ccfa2a13f6d85866cf2b4a9628e
deleted: sha256:138c2e6554219de65614d88c15521bfb2da674cbb0bf840de161f89ff4264b96
deleted: sha256:ec353c2e1a673f456c4b78906d0d77f9d9456cfb5229b78c6a960bfb7496b76a
deleted: sha256:fe22765feaf3907526b4921c73ea6643ff9e334497c9b7e177972cf22f68ee93
deleted: sha256:ff845959c80148421a5c3ae11cc0e6c115f950c89bc949646be55ed18d6a2912
deleted: sha256:a4320831346648c03db64149eafc83092e2b34ab50ca6e8c13112388f25899a7
deleted: sha256:4c76020202ee1d9709e703b7c6de367b325139e74eebd6b55b30a63c196abaf3
deleted: sha256:d7afd92fb07236c8a2045715a86b7d5f0066cef025018cd3ca9a45498c51d1d6
deleted: sha256:9e63c5bce4585dd7038d830a1f1f4e44cb1a1515b00e620ac718e934b484c938
untagged: debian:jessie
untagged: debian@sha256:c1af755d300d0c65bb1194d24bce561d70c98a54fb5ce5b1693beb4f7988272f
deleted: sha256:7b0a06c805e8f23807fb8856621c60851727e85c7bcb751012c813f122734c8d
deleted: sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8
Total reclaimed space: 792.6 MB
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
foo latest 2f287ac753da About a minute ago 3.98 MB
alpine latest 88e169ea8f46 8 days ago 3.98 MB
busybox latest e02e811dd08f 2 months ago 1.09 MB
```
## Related commands
* [system df](system_df.md)
* [container prune](container_prune.md)
* [volume prune](volume_prune.md)
* [network prune](network_prune.md)
* [system prune](system_prune.md)

View File

@ -0,0 +1,343 @@
---
title: "images"
description: "The images command description and usage"
keywords: "list, docker, images"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# images
```markdown
Usage: docker images [OPTIONS] [REPOSITORY[:TAG]]
List images
Options:
-a, --all Show all images (default hides intermediate images)
--digests Show digests
-f, --filter value Filter output based on conditions provided (default [])
- dangling=(true|false)
- label=<key> or label=<key>=<value>
- before=(<image-name>[:tag]|<image-id>|<image@digest>)
- since=(<image-name>[:tag]|<image-id>|<image@digest>)
- reference=(pattern of an image reference)
--format string Pretty-print images using a Go template
--help Print usage
--no-trunc Don't truncate output
-q, --quiet Only show numeric IDs
```
## Description
The default `docker images` will show all top level
images, their repository and tags, and their size.
Docker images have intermediate layers that increase reusability,
decrease disk usage, and speed up `docker build` by
allowing each step to be cached. These intermediate layers are not shown
by default.
The `SIZE` is the cumulative space taken up by the image and all
its parent images. This is also the disk space used by the contents of the
Tar file created when you `docker save` an image.
An image will be listed more than once if it has multiple repository names
or tags. This single image (identifiable by its matching `IMAGE ID`)
uses up the `SIZE` listed only once.
## Examples
### List the most recently created images
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB
committ latest b6fa739cedf5 19 hours ago 1.089 GB
<none> <none> 78a85c484f71 19 hours ago 1.089 GB
docker latest 30557a29d5ab 20 hours ago 1.089 GB
<none> <none> 5ed6274db6ce 24 hours ago 1.089 GB
postgres 9 746b819f315e 4 days ago 213.4 MB
postgres 9.3 746b819f315e 4 days ago 213.4 MB
postgres 9.3.5 746b819f315e 4 days ago 213.4 MB
postgres latest 746b819f315e 4 days ago 213.4 MB
```
### List images by name and tag
The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument
that restricts the list to images that match the argument. If you specify
`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the
given repository.
For example, to list all images in the "java" repository, run this command :
```bash
$ docker images java
REPOSITORY TAG IMAGE ID CREATED SIZE
java 8 308e519aac60 6 days ago 824.5 MB
java 7 493d82594c15 3 months ago 656.3 MB
java latest 2711b1d6f3aa 5 months ago 603.9 MB
```
The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example,
`docker images jav` does not match the image `java`.
If both `REPOSITORY` and `TAG` are provided, only images matching that
repository and tag are listed. To find all local images in the "java"
repository with tag "8" you can use:
```bash
$ docker images java:8
REPOSITORY TAG IMAGE ID CREATED SIZE
java 8 308e519aac60 6 days ago 824.5 MB
```
If nothing matches `REPOSITORY[:TAG]`, the list is empty.
```bash
$ docker images java:0
REPOSITORY TAG IMAGE ID CREATED SIZE
```
### List the full length image IDs
```bash
$ docker images --no-trunc
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
<none> <none> sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB
docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
<none> <none> sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB
<none> <none> sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB
<none> <none> sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB
tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB
<none> <none> sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB
```
### List image digests
Images that use the v2 or later format have a content-addressable identifier
called a `digest`. As long as the input used to generate the image is
unchanged, the digest value is predictable. To list image digest values, use
the `--digests` flag:
```bash
$ docker images --digests
REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE
localhost:5000/test/busybox <none> sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB
```
When pushing or pulling to a 2.0 registry, the `push` or `pull` command
output includes the image digest. You can `pull` using a digest value. You can
also reference by digest in `create`, `run`, and `rmi` commands, as well as the
`FROM` image reference in a Dockerfile.
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* dangling (boolean - true or false)
* label (`label=<key>` or `label=<key>=<value>`)
* before (`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - filter images created before given id or references
* since (`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - filter images created since given id or references
* reference (pattern of an image reference) - filter images whose reference matches the specified pattern
#### Show untagged images (dangling)
```bash
$ docker images --filter "dangling=true"
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 8abc22fbb042 4 weeks ago 0 B
<none> <none> 48e5f45168b9 4 weeks ago 2.489 MB
<none> <none> bf747efa0e2f 4 weeks ago 0 B
<none> <none> 980fe10e5736 12 weeks ago 101.4 MB
<none> <none> dea752e4e117 12 weeks ago 101.4 MB
<none> <none> 511136ea3c5a 8 months ago 0 B
```
This will display untagged images that are the leaves of the images tree (not
intermediary layers). These images occur when a new build of an image takes the
`repo:tag` away from the image ID, leaving it as `<none>:<none>` or untagged.
A warning will be issued if trying to remove an image when a container is presently
using it. By having this flag it allows for batch cleanup.
You can use this in conjunction with `docker rmi ...`:
```bash
$ docker rmi $(docker images -f "dangling=true" -q)
8abc22fbb042
48e5f45168b9
bf747efa0e2f
980fe10e5736
dea752e4e117
511136ea3c5a
```
> **Note**: Docker warns you if any containers exist that are using these
> untagged images.
#### Show images with a given label
The `label` filter matches images based on the presence of a `label` alone or a `label` and a
value.
The following filter matches images with the `com.example.version` label regardless of its value.
```bash
$ docker images --filter "label=com.example.version"
REPOSITORY TAG IMAGE ID CREATED SIZE
match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB
match-me-2 latest dea752e4e117 About a minute ago 188.3 MB
```
The following filter matches images with the `com.example.version` label with the `1.0` value.
```bash
$ docker images --filter "label=com.example.version=1.0"
REPOSITORY TAG IMAGE ID CREATED SIZE
match-me latest 511136ea3c5a About a minute ago 188.3 MB
```
In this example, with the `0.1` value, it returns an empty set because no matches were found.
```bash
$ docker images --filter "label=com.example.version=0.1"
REPOSITORY TAG IMAGE ID CREATED SIZE
```
#### Filter images by time
The `before` filter shows only images created before the image with
given id or reference. For example, having these images:
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
image1 latest eeae25ada2aa 4 minutes ago 188.3 MB
image2 latest dea752e4e117 9 minutes ago 188.3 MB
image3 latest 511136ea3c5a 25 minutes ago 188.3 MB
```
Filtering with `before` would give:
```bash
$ docker images --filter "before=image1"
REPOSITORY TAG IMAGE ID CREATED SIZE
image2 latest dea752e4e117 9 minutes ago 188.3 MB
image3 latest 511136ea3c5a 25 minutes ago 188.3 MB
```
Filtering with `since` would give:
```bash
$ docker images --filter "since=image3"
REPOSITORY TAG IMAGE ID CREATED SIZE
image1 latest eeae25ada2aa 4 minutes ago 188.3 MB
image2 latest dea752e4e117 9 minutes ago 188.3 MB
```
#### Filter images by reference
The `reference` filter shows only images whose reference matches
the specified pattern.
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
busybox latest e02e811dd08f 5 weeks ago 1.09 MB
busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB
busybox musl 733eb3059dce 5 weeks ago 1.21 MB
busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB
```
Filtering with `reference` would give:
```bash
$ docker images --filter=reference='busy*:*libc'
REPOSITORY TAG IMAGE ID CREATED SIZE
busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB
busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB
```
### Format the output
The formatting option (`--format`) will pretty print container output
using a Go template.
Valid placeholders for the Go template are listed below:
| Placeholder | Description|
| ---- | ---- |
| `.ID` | Image ID |
| `.Repository` | Image repository |
| `.Tag` | Image tag |
| `.Digest` | Image digest |
| `.CreatedSince` | Elapsed time since the image was created |
| `.CreatedAt` | Time when the image was created |
| `.Size` | Image disk size |
When using the `--format` option, the `image` command will either
output the data exactly as the template declares or, when using the
`table` directive, will include column headers as well.
The following example uses a template without headers and outputs the
`ID` and `Repository` entries separated by a colon for all images:
```bash
$ docker images --format "{{.ID}}: {{.Repository}}"
77af4d6b9913: <none>
b6fa739cedf5: committ
78a85c484f71: <none>
30557a29d5ab: docker
5ed6274db6ce: <none>
746b819f315e: postgres
746b819f315e: postgres
746b819f315e: postgres
746b819f315e: postgres
```
To list all images with their repository and tag in a table format you
can use:
```bash
$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}"
IMAGE ID REPOSITORY TAG
77af4d6b9913 <none> <none>
b6fa739cedf5 committ latest
78a85c484f71 <none> <none>
30557a29d5ab docker latest
5ed6274db6ce <none> <none>
746b819f315e postgres 9
746b819f315e postgres 9.3
746b819f315e postgres 9.3.5
746b819f315e postgres latest
```

View File

@ -0,0 +1,89 @@
---
title: "import"
description: "The import command description and usage"
keywords: "import, file, system, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# import
```markdown
Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]
Import the contents from a tarball to create a filesystem image
Options:
-c, --change value Apply Dockerfile instruction to the created image (default [])
--help Print usage
-m, --message string Set commit message for imported image
```
## Description
You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The
`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz)
containing a filesystem or to an individual file on the Docker host. If you
specify an archive, Docker untars it in the container relative to the `/`
(root). If you specify an individual file, you must specify the full path within
the host. To import from a remote location, specify a `URI` that begins with the
`http://` or `https://` protocol.
The `--change` option will apply `Dockerfile` instructions to the image
that is created.
Supported `Dockerfile` instructions:
`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
## Examples
### Import from a remote location
This will create a new untagged image.
```bash
$ docker import http://example.com/exampleimage.tgz
```
### Import from a local file
- Import to docker via pipe and `STDIN`.
```bash
$ cat exampleimage.tgz | docker import - exampleimagelocal:new
```
- Import with a commit message.
```bash
$ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new
```
- Import to docker from a local archive.
```bash
$ docker import /path/to/exampleimage.tgz
```
### Import from a local directory
```bash
$ sudo tar -c . | docker import - exampleimagedir
```
### Import from a local directory with new configurations
```bash
$ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir
```
Note the `sudo` in this example you must preserve
the ownership of the files (especially root ownership) during the
archiving with tar. If you are not root (or the sudo command) when you
tar, then the ownerships might not get preserved.

View File

@ -0,0 +1,184 @@
---
title: "Docker commands"
description: "Docker's CLI command description and usage"
keywords: "Docker, Docker documentation, CLI, command line"
identifier: "smn_cli_guide"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# The Docker commands
This section contains reference information on using Docker's command line
client. Each command has a reference page along with samples. If you are
unfamiliar with the command line, you should start by reading about how to [Use
the Docker command line](cli.md).
You start the Docker daemon with the command line. How you start the daemon
affects your Docker containers. For that reason you should also make sure to
read the [`dockerd`](dockerd.md) reference page.
## Commands by object
### Docker management commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [dockerd](dockerd.md) | Launch the Docker daemon |
| [info](info.md) | Display system-wide information |
| [inspect](inspect.md)| Return low-level information on a container or image |
| [version](version.md) | Show the Docker version information |
### Image commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [build](build.md) | Build an image from a Dockerfile |
| [commit](commit.md) | Create a new image from a container's changes |
| [history](history.md) | Show the history of an image |
| [images](images.md) | List images |
| [import](import.md) | Import the contents from a tarball to create a filesystem image |
| [load](load.md) | Load an image from a tar archive or STDIN |
| [image prune](image_prune.md) | Remove unused images |
| [rmi](rmi.md) | Remove one or more images |
| [save](save.md) | Save images to a tar archive |
| [tag](tag.md) | Tag an image into a repository |
### Container commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [attach](attach.md) | Attach to a running container |
| [container prune](container_prune.md) | Remove all stopped containers |
| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT |
| [create](create.md) | Create a new container |
| [diff](diff.md) | Inspect changes on a container's filesystem |
| [events](events.md) | Get real time events from the server |
| [exec](exec.md) | Run a command in a running container |
| [export](export.md) | Export a container's filesystem as a tar archive |
| [kill](kill.md) | Kill a running container |
| [logs](logs.md) | Fetch the logs of a container |
| [pause](pause.md) | Pause all processes within a container |
| [port](port.md) | List port mappings or a specific mapping for the container |
| [ps](ps.md) | List containers |
| [rename](rename.md) | Rename a container |
| [restart](restart.md) | Restart a running container |
| [rm](rm.md) | Remove one or more containers |
| [run](run.md) | Run a command in a new container |
| [start](start.md) | Start one or more stopped containers |
| [stats](stats.md) | Display a live stream of container(s) resource usage statistics |
| [stop](stop.md) | Stop a running container |
| [top](top.md) | Display the running processes of a container |
| [unpause](unpause.md) | Unpause all processes within a container |
| [update](update.md) | Update configuration of one or more containers |
| [wait](wait.md) | Block until a container stops, then print its exit code |
### Hub and registry commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [login](login.md) | Register or log in to a Docker registry |
| [logout](logout.md) | Log out from a Docker registry |
| [pull](pull.md) | Pull an image or a repository from a Docker registry |
| [push](push.md) | Push an image or a repository to a Docker registry |
| [search](search.md) | Search the Docker Hub for images |
### Network and connectivity commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [network connect](network_connect.md) | Connect a container to a network |
| [network create](network_create.md) | Create a new network |
| [network disconnect](network_disconnect.md) | Disconnect a container from a network |
| [network inspect](network_inspect.md) | Display information about a network |
| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about |
| [network prune](network_prune.md) | Remove all unused networks |
| [network rm](network_rm.md) | Removes one or more networks |
### Shared data volume commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data |
| [volume inspect](volume_inspect.md) | Display information about a volume |
| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about |
| [volume prune](volume_prune.md) | Remove all unused volumes |
| [volume rm](volume_rm.md) | Remove one or more volumes |
### Swarm node commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager |
| [node inspect](node_inspect.md) | Inspect a node in the swarm |
| [node ls](node_ls.md) | List nodes in the swarm |
| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager |
| [node ps](node_ps.md) | List tasks running on one or more nodes |
| [node rm](node_rm.md) | Remove one or more nodes from the swarm |
| [node update](node_update.md) | Update attributes for a node |
### Swarm management commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [swarm init](swarm_init.md) | Initialize a swarm |
| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node |
| [swarm leave](swarm_leave.md) | Remove the current node from the swarm |
| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens |
| [swarm unlock](swarm_unlock.md) | Unlock swarm |
| [swarm unlock-key](swarm_unlock_key.md) | Manage the unlock key |
| [swarm update](swarm_update.md) | Update attributes of a swarm |
### Swarm service commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [service create](service_create.md) | Create a new service |
| [service inspect](service_inspect.md) | Inspect a service |
| [service logs](service_logs.md) | Fetch the logs of a service or task |
| [service ls](service_ls.md) | List services in the swarm |
| [service ps](service_ps.md) | List the tasks of a service |
| [service rm](service_rm.md) | Remove a service from the swarm |
| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service |
| [service update](service_update.md) | Update the attributes of a service |
### Swarm secret commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [secret create](secret_create.md) | Create a secret from a file or STDIN as content |
| [secret inspect](service_inspect.md) | Inspect the specified secret |
| [secret ls](secret_ls.md) | List secrets in the swarm |
| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm |
### Swarm stack commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack |
| [stack ls](stack_ls.md) | List stacks in the swarm |
| [stack ps](stack_ps.md) | List the tasks in the stack |
| [stack rm](stack_rm.md) | Remove the stack from the swarm |
| [stack services](stack_services.md) | List the services in the stack |
### Plugin commands
| Command | Description |
|:--------|:-------------------------------------------------------------------|
| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration |
| [plugin disable](plugin_disable.md) | Disable a plugin |
| [plugin enable](plugin_enable.md) | Enable a plugin |
| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin |
| [plugin install](plugin_install.md) | Install a plugin |
| [plugin ls](plugin_ls.md) | List plugins |
| [plugin push](plugin_push.md) | Push a plugin to a registry |
| [plugin rm](plugin_rm.md) | Remove a plugin |
| [plugin set](plugin_set.md) | Change settings for a plugin |

View File

@ -0,0 +1,245 @@
---
title: "info"
description: "The info command description and usage"
keywords: "display, docker, information"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# info
```markdown
Usage: docker info [OPTIONS]
Display system-wide information
Options:
-f, --format string Format the output using the given Go template
--help Print usage
```
## Description
This command displays system wide information regarding the Docker installation.
Information displayed includes the kernel version, number of containers and images.
The number of images shown is the number of unique images. The same image tagged
under different names is counted only once.
If a format is specified, the given template will be executed instead of the
default format. Go's [text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
Depending on the storage driver in use, additional information can be shown, such
as pool name, data file, metadata file, data space used, total data space, metadata
space used, and total metadata space.
The data file is where the images are stored and the metadata file is where the
meta data regarding those images are stored. When run for the first time Docker
allocates a certain amount of data space and meta data space from the space
available on the volume where `/var/lib/docker` is mounted.
## Examples
### Show output
The example below shows the output for a daemon running on Red Hat Enterprise Linux,
using the `devicemapper` storage driver. As can be seen in the output, additional
information about the `devicemapper` storage driver is shown:
```bash
$ docker info
Containers: 14
Running: 3
Paused: 1
Stopped: 10
Images: 52
Server Version: 1.10.3
Storage Driver: devicemapper
Pool Name: docker-202:2-25583803-pool
Pool Blocksize: 65.54 kB
Base Device Size: 10.74 GB
Backing Filesystem: xfs
Data file: /dev/loop0
Metadata file: /dev/loop1
Data Space Used: 1.68 GB
Data Space Total: 107.4 GB
Data Space Available: 7.548 GB
Metadata Space Used: 2.322 MB
Metadata Space Total: 2.147 GB
Metadata Space Available: 2.145 GB
Udev Sync Supported: true
Deferred Removal Enabled: false
Deferred Deletion Enabled: false
Deferred Deleted Device Count: 0
Data loop file: /var/lib/docker/devicemapper/devicemapper/data
Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata
Library Version: 1.02.107-RHEL7 (2015-12-01)
Execution Driver: native-0.2
Logging Driver: json-file
Plugins:
Volume: local
Network: null host bridge
Kernel Version: 3.10.0-327.el7.x86_64
Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo)
OSType: linux
Architecture: x86_64
CPUs: 1
Total Memory: 991.7 MiB
Name: ip-172-30-0-91.ec2.internal
ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S
Docker Root Dir: /var/lib/docker
Debug mode (client): false
Debug mode (server): false
Username: gordontheturtle
Registry: https://index.docker.io/v1/
Insecure registries:
myinsecurehost:5000
127.0.0.0/8
```
### Show debugging output
Here is a sample output for a daemon running on Ubuntu, using the overlay2
storage driver and a node that is part of a 2-node swarm:
```bash
$ docker -D info
Containers: 14
Running: 3
Paused: 1
Stopped: 10
Images: 52
Server Version: 1.13.0
Storage Driver: overlay2
Backing Filesystem: extfs
Supports d_type: true
Native Overlay Diff: false
Logging Driver: json-file
Cgroup Driver: cgroupfs
Plugins:
Volume: local
Network: bridge host macvlan null overlay
Swarm: active
NodeID: rdjq45w1op418waxlairloqbm
Is Manager: true
ClusterID: te8kdyw33n36fqiz74bfjeixd
Managers: 1
Nodes: 2
Orchestration:
Task History Retention Limit: 5
Raft:
Snapshot Interval: 10000
Number of Old Snapshots to Retain: 0
Heartbeat Tick: 1
Election Tick: 3
Dispatcher:
Heartbeat Period: 5 seconds
CA Configuration:
Expiry Duration: 3 months
Root Rotation In Progress: false
Node Address: 172.16.66.128 172.16.66.129
Manager Addresses:
172.16.66.128:2477
Runtimes: runc
Default Runtime: runc
Init Binary: docker-init
containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531
runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2
init version: N/A (expected: v0.13.0)
Security Options:
apparmor
seccomp
Profile: default
Kernel Version: 4.4.0-31-generic
Operating System: Ubuntu 16.04.1 LTS
OSType: linux
Architecture: x86_64
CPUs: 2
Total Memory: 1.937 GiB
Name: ubuntu
ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326
Docker Root Dir: /var/lib/docker
Debug Mode (client): true
Debug Mode (server): true
File Descriptors: 30
Goroutines: 123
System Time: 2016-11-12T17:24:37.955404361-08:00
EventsListeners: 0
Http Proxy: http://test:test@proxy.example.com:8080
Https Proxy: https://test:test@proxy.example.com:8080
No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com
Registry: https://index.docker.io/v1/
WARNING: No swap limit support
Labels:
storage=ssd
staging=true
Experimental: false
Insecure Registries:
127.0.0.0/8
Registry Mirrors:
http://192.168.1.2/
http://registry-mirror.example.com:5000/
Live Restore Enabled: false
```
The global `-D` option causes all `docker` commands to output debug information.
### Format the output
You can also specify the output format:
```bash
$ docker info --format '{{json .}}'
{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...}
```
### Run `docker info` on Windows
Here is a sample output for a daemon running on Windows Server 2016:
```none
E:\docker>docker info
Containers: 1
Running: 0
Paused: 0
Stopped: 1
Images: 17
Server Version: 1.13.0
Storage Driver: windowsfilter
Windows:
Logging Driver: json-file
Plugins:
Volume: local
Network: nat null overlay
Swarm: inactive
Default Isolation: process
Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937)
Operating System: Windows Server 2016 Datacenter
OSType: windows
Architecture: x86_64
CPUs: 8
Total Memory: 3.999 GiB
Name: WIN-V0V70C0LU5P
ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62
Docker Root Dir: C:\control
Debug Mode (client): false
Debug Mode (server): false
Registry: https://index.docker.io/v1/
Insecure Registries:
127.0.0.0/8
Registry Mirrors:
http://192.168.1.2/
http://registry-mirror.example.com:5000/
Live Restore Enabled: false
```

View File

@ -0,0 +1,122 @@
---
title: "inspect"
description: "The inspect command description and usage"
keywords: "inspect, container, json"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# inspect
```markdown
Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...]
Return low-level information on Docker object(s) (e.g. container, image, volume,
network, node, service, or task) identified by name or ID
Options:
-f, --format Format the output using the given Go template
--help Print usage
-s, --size Display total file sizes if the type is container
--type Return JSON for specified type
```
## Description
Docker inspect provides detailed information on constructs controlled by Docker.
By default, `docker inspect` will render results in a JSON array.
## Request a custom response format (--format)
If a format is specified, the given template will be executed for each result.
Go's [text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
## Specify target type (--type)
`--type container|image|node|network|secret|service|volume|task|plugin`
The `docker inspect` command matches any type of object by either ID or name.
In some cases multiple type of objects (for example, a container and a volume)
exist with the same name, making the result ambigious.
To restrict `docker inspect` to a specific type of object, use the `--type`
option.
The following example inspects a _volume_ named "myvolume"
```bash
$ docker inspect --type=volume myvolume
```
## Examples
### Get an instance's IP address
For the most part, you can pick out any field from the JSON in a fairly
straightforward manner.
```bash
$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID
```
### Get an instance's MAC address
```bash
$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID
```
### Get an instance's log path
```bash
$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID
```
### Get an instance's image name
```bash
$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID
```
### List all port bindings
You can loop over arrays and maps in the results to produce simple text
output:
```bash
$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
```
### Find a specific port mapping
The `.Field` syntax doesn't work when the field name begins with a
number, but the template language's `index` function does. The
`.NetworkSettings.Ports` section contains a map of the internal port
mappings to a list of external address/port objects. To grab just the
numeric public port, you use `index` to find the specific port map, and
then `index` 0 contains the first object inside of that. Then we ask for
the `HostPort` field to get the public address.
```bash
$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
```
### Get a subsection in JSON format
If you request a field which is itself a structure containing other
fields, by default you get a Go-style dump of the inner values.
Docker adds a template function, `json`, which can be applied to get
results in JSON format.
```bash
$ docker inspect --format='{{json .Config}}' $INSTANCE_ID
```

View File

@ -0,0 +1,35 @@
---
title: "kill"
description: "The kill command description and usage"
keywords: "container, kill, signal"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# kill
```markdown
Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
Kill one or more running containers
Options:
--help Print usage
-s, --signal string Signal to send to the container (default "KILL")
```
## Description
The main process inside the container will be sent `SIGKILL`, or any
signal specified with option `--signal`.
> **Note**: `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of
> `/bin/sh -c`, which does not pass signals. This means that the executable is
> not the containers PID 1 and does not receive Unix signals.

View File

@ -0,0 +1,62 @@
---
title: "load"
description: "The load command description and usage"
keywords: "stdin, tarred, repository"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# load
```markdown
Usage: docker load [OPTIONS]
Load an image from a tar archive or STDIN
Options:
--help Print usage
-i, --input string Read from tar archive file, instead of STDIN.
The tarball may be compressed with gzip, bzip, or xz
-q, --quiet Suppress the load output but still outputs the imported images
```
## Description
`docker load` loads a tarred repository from a file or the standard input stream.
It restores both images and tags.
## Examples
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
$ docker load < busybox.tar.gz
Loaded image: busybox:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
busybox latest 769b9341d937 7 weeks ago 2.489 MB
$ docker load --input fedora.tar
Loaded image: fedora:rawhide
Loaded image: fedora:20
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
busybox latest 769b9341d937 7 weeks ago 2.489 MB
fedora rawhide 0d20aec6529d 7 weeks ago 387 MB
fedora 20 58394af37342 7 weeks ago 385.5 MB
fedora heisenbug 58394af37342 7 weeks ago 385.5 MB
fedora latest 58394af37342 7 weeks ago 385.5 MB
```

View File

@ -0,0 +1,158 @@
---
title: "login"
description: "The login command description and usage"
keywords: "registry, login, image"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# login
```markdown
Usage: docker login [OPTIONS] [SERVER]
Log in to a Docker registry.
If no server is specified, the default is defined by the daemon.
Options:
--help Print usage
-p, --password string Password
-u, --username string Username
```
## Description
Login to a registry.
### Login to a self-hosted registry
If you want to login to a self-hosted registry you can specify this by
adding the server name.
```bash
$ docker login localhost:8080
```
### Privileged user requirement
`docker login` requires user to use `sudo` or be `root`, except when:
1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`.
2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details.
You can log into any public or private repository for which you have
credentials. When you log in, the command stores encoded credentials in
`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows.
### Credentials store
The Docker Engine can keep user credentials in an external credentials store,
such as the native keychain of the operating system. Using an external store
is more secure than storing credentials in the Docker configuration file.
To use a credentials store, you need an external helper program to interact
with a specific keychain or external store. Docker requires the helper
program to be in the client's host `$PATH`.
This is the list of currently available credentials helpers and where
you can download them from:
- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases
- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases
- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases
You need to specify the credentials store in `$HOME/.docker/config.json`
to tell the docker engine to use it. The value of the config property should be
the suffix of the program to use (i.e. everything after `docker-credential-`).
For example, to use `docker-credential-osxkeychain`:
```json
{
"credsStore": "osxkeychain"
}
```
If you are currently logged in, run `docker logout` to remove
the credentials from the file and run `docker login` again.
### Credential helper protocol
Credential helpers can be any program or script that follows a very simple protocol.
This protocol is heavily inspired by Git, but it differs in the information shared.
The helpers always use the first argument in the command to identify the action.
There are only three possible values for that argument: `store`, `get`, and `erase`.
The `store` command takes a JSON payload from the standard input. That payload carries
the server address, to identify the credential, the user name, and either a password
or an identity token.
```json
{
"ServerURL": "https://index.docker.io/v1",
"Username": "david",
"Secret": "passw0rd1"
}
```
If the secret being stored is an identity token, the Username should be set to
`<token>`.
The `store` command can write error messages to `STDOUT` that the docker engine
will show if there was an issue.
The `get` command takes a string payload from the standard input. That payload carries
the server address that the docker engine needs credentials for. This is
an example of that payload: `https://index.docker.io/v1`.
The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name
and password from this payload:
```json
{
"Username": "david",
"Secret": "passw0rd1"
}
```
The `erase` command takes a string payload from `STDIN`. That payload carries
the server address that the docker engine wants to remove credentials for. This is
an example of that payload: `https://index.docker.io/v1`.
The `erase` command can write error messages to `STDOUT` that the docker engine
will show if there was an issue.
### Credential helpers
Credential helpers are similar to the credential store above, but act as the
designated programs to handle credentials for *specific registries*. The default
credential store (`credsStore` or the config file itself) will not be used for
operations concerning credentials of the specified registries.
### Logging out
If you are currently logged in, run `docker logout` to remove
the credentials from the default store.
Credential helpers are specified in a similar way to `credsStore`, but
allow for multiple helpers to be configured at a time. Keys specify the
registry domain, and values specify the suffix of the program to use
(i.e. everything after `docker-credential-`).
For example:
```json
{
"credHelpers": {
"registry.example.com": "registryhelper",
"awesomereg.example.org": "hip-star",
"unicorn.example.io": "vcbait"
}
}
```

View File

@ -0,0 +1,32 @@
---
title: "logout"
description: "The logout command description and usage"
keywords: "logout, docker, registry"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# logout
```markdown
Usage: docker logout [SERVER]
Log out from a Docker registry.
If no server is specified, the default is defined by the daemon.
Options:
--help Print usage
```
## Examples
```bash
$ docker logout localhost:8080
```

View File

@ -0,0 +1,68 @@
---
title: "logs"
description: "The logs command description and usage"
keywords: "logs, retrieve, docker"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# logs
```markdown
Usage: docker logs [OPTIONS] CONTAINER
Fetch the logs of a container
Options:
--details Show extra details provided to logs
-f, --follow Follow log output
--help Print usage
--since string Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)
--tail string Number of lines to show from the end of the logs (default "all")
-t, --timestamps Show timestamps
```
## Description
The `docker logs` command batch-retrieves logs present at the time of execution.
> **Note**: this command is only functional for containers that are started with
> the `json-file` or `journald` logging driver.
For more information about selecting and configuring logging drivers, refer to
[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/).
The `docker logs --follow` command will continue streaming the new output from
the container's `STDOUT` and `STDERR`.
Passing a negative number or a non-integer to `--tail` is invalid and the
value is set to `all` in that case.
The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants)
, for example `2014-09-16T06:17:46.000000000Z`, to each
log entry. To ensure that the timestamps are aligned the
nano-second part of the timestamp will be padded with zero when necessary.
The `docker logs --details` command will add on extra attributes, such as
environment variables and labels, provided to `--log-opt` when creating the
container.
The `--since` option shows only the container logs generated after
a given date. You can specify the date as an RFC 3339 date, a UNIX
timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date
format you may also use RFC3339Nano, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long. You can combine the
`--since` option with either or both of the `--follow` or `--tail` options.

View File

@ -0,0 +1,51 @@
---
title: "network"
description: "The network command description and usage"
keywords: "network"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network
```markdown
Usage: docker network COMMAND
Manage networks
Options:
--help Print usage
Commands:
connect Connect a container to a network
create Create a network
disconnect Disconnect a container from a network
inspect Display detailed information on one or more networks
ls List networks
prune Remove all unused networks
rm Remove one or more networks
Run 'docker network COMMAND --help' for more information on a command.
```
## Description
Manage networks. You can use subcommands to create, inspect, list, remove,
prune, connect, and disconnect networks.
## Related commands
* [network create](network_create.md)
* [network inspect](network_inspect.md)
* [network list](network_list.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [network connect](network_connect.md)
* [network disconnect](network_disconnect.md)

View File

@ -0,0 +1,117 @@
---
title: "network connect"
description: "The network connect command description and usage"
keywords: "network, connect, user-defined"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network connect
```markdown
Usage: docker network connect [OPTIONS] NETWORK CONTAINER
Connect a container to a network
Options:
--alias value Add network-scoped alias for the container (default [])
--help Print usage
--ip string IPv4 address (e.g., 172.30.100.104)
--ip6 string IPv6 address (e.g., 2001:db8::33)
--link value Add link to another container (default [])
--link-local-ip value Add a link-local address for the container (default [])
```
## Description
Connects a container to a network. You can connect a container by name
or by ID. Once connected, the container can communicate with other containers in
the same network.
## Examples
### Connect a running container to a network
```bash
$ docker network connect multi-host-network container1
```
### Connect a container to a network when it starts
You can also use the `docker run --network=<network-name>` option to start a container and immediately connect it to a network.
```bash
$ docker run -itd --network=multi-host-network busybox
```
### Specify the IP address a container will use on a given network
You can specify the IP address you want to be assigned to the container's interface.
```bash
$ docker network connect --ip 10.10.36.122 multi-host-network container2
```
### Use the legacy `--link` option
You can use `--link` option to link another container with a preferred alias
```bash
$ docker network connect --link container1:c1 multi-host-network container2
```
### Create a network alias for a container
`--alias` option can be used to resolve the container by another name in the network
being connected to.
```bash
$ docker network connect --alias db --alias mysql multi-host-network container2
```
### Network implications of stopping, pausing, or restarting containers
You can pause, restart, and stop containers that are connected to a network.
A container connects to its configured networks when it runs.
If specified, the container's IP address(es) is reapplied when a stopped
container is restarted. If the IP address is no longer available, the container
fails to start. One way to guarantee that the IP address is available is
to specify an `--ip-range` when creating the network, and choose the static IP
address(es) from outside that range. This ensures that the IP address is not
given to another container while this container is not on the network.
```bash
$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network
```
```bash
$ docker network connect --ip 172.20.128.2 multi-host-network container2
```
To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network.
Once connected in network, containers can communicate using only another
container's IP address or name. For `overlay` networks or custom plugins that
support multi-host connectivity, containers connected to the same multi-host
network but launched from different Engines can also communicate in this way.
You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks.
## Related commands
* [network inspect](network_inspect.md)
* [network create](network_create.md)
* [network disconnect](network_disconnect.md)
* [network ls](network_ls.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)
* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/)

View File

@ -0,0 +1,227 @@
---
title: "network create"
description: "The network create command description and usage"
keywords: "network, create"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network create
```markdown
Usage: docker network create [OPTIONS] NETWORK
Create a network
Options:
--attachable Enable manual container attachment
--ingress Specify the network provides the routing-mesh
--aux-address value Auxiliary IPv4 or IPv6 addresses used by Network
driver (default map[])
-d, --driver string Driver to manage the Network (default "bridge")
--gateway value IPv4 or IPv6 Gateway for the master subnet (default [])
--help Print usage
--internal Restrict external access to the network
--ip-range value Allocate container ip from a sub-range (default [])
--ipam-driver string IP Address Management Driver (default "default")
--ipam-opt value Set IPAM driver specific options (default map[])
--ipv6 Enable IPv6 networking
--label value Set metadata on a network (default [])
-o, --opt value Set driver specific options (default map[])
--subnet value Subnet in CIDR format that represents a
network segment (default [])
--scope value Promote a network to swarm scope (value = [ local | swarm ])
--config-only Creates a configuration only network
--config-from The name of the network from which copying the configuration
```
## Description
Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the
built-in network drivers. If you have installed a third party or your own custom
network driver you can specify that `DRIVER` here also. If you don't specify the
`--driver` option, the command automatically creates a `bridge` network for you.
When you install Docker Engine it creates a `bridge` network automatically. This
network corresponds to the `docker0` bridge that Engine has traditionally relied
on. When you launch a new container with `docker run` it automatically connects to
this bridge network. You cannot remove this default bridge network, but you can
create new ones using the `network create` command.
```bash
$ docker network create -d bridge my-bridge-network
```
Bridge networks are isolated networks on a single Engine installation. If you
want to create a network that spans multiple Docker hosts each running an
Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay
networks require some pre-existing conditions before you can create one. These
conditions are:
* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores.
* A cluster of hosts with connectivity to the key-value store.
* A properly configured Engine `daemon` on each host in the cluster.
The `dockerd` options that support the `overlay` network are:
* `--cluster-store`
* `--cluster-store-opt`
* `--cluster-advertise`
To read more about these options and how to configure them, see ["*Get started
with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay).
While not required, it is a good idea to install Docker Swarm to
manage the cluster that makes up your network. Swarm provides sophisticated
discovery and server management tools that can assist your implementation.
Once you have prepared the `overlay` network prerequisites you simply choose a
Docker host in the cluster and issue the following to create the network:
```bash
$ docker network create -d overlay my-multihost-network
```
Network names must be unique. The Docker daemon attempts to identify naming
conflicts but this is not guaranteed. It is the user's responsibility to avoid
name conflicts.
## Examples
### Connect containers
When you start a container, use the `--network` flag to connect it to a network.
This example adds the `busybox` container to the `mynet` network:
```bash
$ docker run -itd --network=mynet busybox
```
If you want to add a container to a network after the container is already
running, use the `docker network connect` subcommand.
You can connect multiple containers to the same network. Once connected, the
containers can communicate using only another container's IP address or name.
For `overlay` networks or custom plugins that support multi-host connectivity,
containers connected to the same multi-host network but launched from different
Engines can also communicate in this way.
You can disconnect a container from a network using the `docker network
disconnect` command.
### Specify advanced options
When you create a network, Engine creates a non-overlapping subnetwork for the
network by default. This subnetwork is not a subdivision of an existing
network. It is purely for ip-addressing purposes. You can override this default
and specify subnetwork values directly using the `--subnet` option. On a
`bridge` network you can only create a single subnet:
```bash
$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0
```
Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address`
options.
```bash
$ docker network create \
--driver=bridge \
--subnet=172.28.0.0/16 \
--ip-range=172.28.5.0/24 \
--gateway=172.28.5.254 \
br0
```
If you omit the `--gateway` flag the Engine selects one for you from inside a
preferred pool. For `overlay` networks and for network driver plugins that
support it you can create multiple subnetworks.
```bash
$ docker network create -d overlay \
--subnet=192.168.0.0/16 \
--subnet=192.170.0.0/16 \
--gateway=192.168.0.100 \
--gateway=192.170.0.100 \
--ip-range=192.168.1.0/24 \
--aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \
--aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \
my-multihost-network
```
Be sure that your subnetworks do not overlap. If they do, the network create
fails and Engine returns an error.
### Bridge driver options
When creating a custom network, the default network driver (i.e. `bridge`) has
additional options that can be passed. The following are those options and the
equivalent docker daemon flags used for docker0 bridge:
| Option | Equivalent | Description |
|--------------------------------------------------|-------------|-------------------------------------------------------|
| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge |
| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading |
| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity |
| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports |
| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU |
The following arguments can be passed to `docker network create` for any
network driver, again with their approximate equivalents to `docker daemon`.
| Argument | Equivalent | Description |
|--------------|----------------|--------------------------------------------|
| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet |
| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range |
| `--internal` | - | Restrict external access to the network |
| `--ipv6` | `--ipv6` | Enable IPv6 networking |
| `--subnet` | `--bip` | Subnet for network |
For example, let's use `-o` or `--opt` options to specify an IP address binding
when publishing ports:
```bash
$ docker network create \
-o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \
simple-network
```
### Network internal mode
By default, when you connect a container to an `overlay` network, Docker also
connects a bridge network to it to provide external connectivity. If you want
to create an externally isolated `overlay` network, you can specify the
`--internal` option.
### Network ingress mode
You can create the network which will be used to provide the routing-mesh in the
swarm cluster. You do so by specifying `--ingress` when creating the network. Only
one ingress network can be created at the time. The network can be removed only
if no services depend on it. Any option available when creating a overlay network
is also available when creating the ingress network, besides the `--attachable` option.
```bash
$ docker network create -d overlay \
--subnet=10.11.0.0/16 \
--ingress \
--opt com.docker.network.mtu=9216 \
--opt encrypted=true \
my-ingress-network
```
## Related commands
* [network inspect](network_inspect.md)
* [network connect](network_connect.md)
* [network disconnect](network_disconnect.md)
* [network ls](network_ls.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)

View File

@ -0,0 +1,48 @@
---
title: "network disconnect"
description: "The network disconnect command description and usage"
keywords: "network, disconnect, user-defined"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network disconnect
```markdown
Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER
Disconnect a container from a network
Options:
-f, --force Force the container to disconnect from a network
--help Print usage
```
## Description
Disconnects a container from a network. The container must be running to
disconnect it from the network.
## Examples
```bash
$ docker network disconnect multi-host-network container1
```
## Related commands
* [network inspect](network_inspect.md)
* [network connect](network_connect.md)
* [network create](network_create.md)
* [network ls](network_ls.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)

View File

@ -0,0 +1,307 @@
---
title: "network inspect"
description: "The network inspect command description and usage"
keywords: "network, inspect, user-defined"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network inspect
```markdown
Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...]
Display detailed information on one or more networks
Options:
-f, --format string Format the output using the given Go template
--help Print usage
```
## Description
Returns information about one or more networks. By default, this command renders
all results in a JSON object.
## Examples
## Inspect the `bridge` network
Connect two containers to the default `bridge` network:
```bash
$ sudo docker run -itd --name=container1 busybox
f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27
$ sudo docker run -itd --name=container2 busybox
bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727
```
The `network inspect` command shows the containers, by id, in its
results. For networks backed by multi-host network driver, such as Overlay,
this command also shows the container endpoints in other hosts in the
cluster. These endpoints are represented as "ep-{endpoint-id}" in the output.
However, for swarm mode networks, only the endpoints that are local to the
node are shown.
You can specify an alternate format to execute a given
template for each result. Go's
[text/template](http://golang.org/pkg/text/template/) package describes all the
details of the format.
```none
$ sudo docker network inspect bridge
[
{
"Name": "bridge",
"Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f",
"Created": "2016-10-19T04:33:30.360899459Z",
"Scope": "local",
"Driver": "bridge",
"IPAM": {
"Driver": "default",
"Config": [
{
"Subnet": "172.17.42.1/16",
"Gateway": "172.17.42.1"
}
]
},
"Internal": false,
"Containers": {
"bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": {
"Name": "container2",
"EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
},
"f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": {
"Name": "container1",
"EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad",
"MacAddress": "02:42:ac:11:00:01",
"IPv4Address": "172.17.0.1/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
```
### Inspect a user-defined network
Create and inspect a user-defined network:
```bash
$ docker network create simple-network
69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
```
```none
$ docker network inspect simple-network
[
{
"Name": "simple-network",
"Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
"Created": "2016-10-19T04:33:30.360899459Z",
"Scope": "local",
"Driver": "bridge",
"IPAM": {
"Driver": "default",
"Config": [
{
"Subnet": "172.22.0.0/16",
"Gateway": "172.22.0.1"
}
]
},
"Containers": {},
"Options": {},
"Labels": {}
}
]
```
### Inspect the `ingress` network
For swarm mode overlay networks `network inspect` also shows the IP address and node name
of the peers. Peers are the nodes in the swarm cluster which have at least one task attached
to the network. Node name is of the format `<hostname>-<unique ID>`.
```none
$ docker network inspect ingress
[
{
"Name": "ingress",
"Id": "j0izitrut30h975vk4m1u5kk3",
"Created": "2016-11-08T06:49:59.803387552Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.255.0.0/16",
"Gateway": "10.255.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {
"ingress-sbox": {
"Name": "ingress-endpoint",
"EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115",
"MacAddress": "02:42:0a:ff:00:03",
"IPv4Address": "10.255.0.3/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "256"
},
"Labels": {},
"Peers": [
{
"Name": "net-1-1d22adfe4d5c",
"IP": "192.168.33.11"
},
{
"Name": "net-2-d55d838b34af",
"IP": "192.168.33.12"
},
{
"Name": "net-3-8473f8140bd9",
"IP": "192.168.33.13"
}
]
}
]
```
### Using `verbose` option for `network inspect`
`docker network inspect --verbose` for swarm mode overlay networks shows service-specific
details such as the service's VIP and port mappings. It also shows IPs of service tasks,
and the IPs of the nodes where the tasks are running.
Following is an example output for a overlay network `ov1` that has one service `s1`
attached to. service `s1` in this case has three replicas.
```bash
$ docker network inspect --verbose ov1
[
{
"Name": "ov1",
"Id": "ybmyjvao9vtzy3oorxbssj13b",
"Created": "2017-03-13T17:04:39.776106792Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {
"020403bd88a15f60747fd25d1ad5fa1272eb740e8a97fc547d8ad07b2f721c5e": {
"Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o",
"EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e",
"MacAddress": "02:42:0a:00:00:04",
"IPv4Address": "10.0.0.4/24",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4097"
},
"Labels": {},
"Peers": [
{
"Name": "net-3-5d3cfd30a58c",
"IP": "192.168.33.13"
},
{
"Name": "net-1-6ecbc0040a73",
"IP": "192.168.33.11"
},
{
"Name": "net-2-fb80208efd75",
"IP": "192.168.33.12"
}
],
"Services": {
"s1": {
"VIP": "10.0.0.2",
"Ports": [],
"LocalLBIndex": 257,
"Tasks": [
{
"Name": "s1.2.q4hcq2aiiml25ubtrtg4q1txt",
"EndpointID": "040879b027e55fb658e8b60ae3b87c6cdac7d291e86a190a3b5ac6567b26511a",
"EndpointIP": "10.0.0.5",
"Info": {
"Host IP": "192.168.33.11"
}
},
{
"Name": "s1.3.yawl4cgkp7imkfx469kn9j6lm",
"EndpointID": "106edff9f120efe44068b834e1cddb5b39dd4a3af70211378b2f7a9e562bbad8",
"EndpointIP": "10.0.0.3",
"Info": {
"Host IP": "192.168.33.12"
}
},
{
"Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o",
"EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e",
"EndpointIP": "10.0.0.4",
"Info": {
"Host IP": "192.168.33.13"
}
}
]
}
}
}
]
```
## Related commands
* [network disconnect ](network_disconnect.md)
* [network connect](network_connect.md)
* [network create](network_create.md)
* [network ls](network_ls.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)

View File

@ -0,0 +1,250 @@
---
title: "network ls"
description: "The network ls command description and usage"
keywords: "network, list, user-defined"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# docker network ls
```markdown
Usage: docker network ls [OPTIONS]
List networks
Aliases:
ls, list
Options:
-f, --filter filter Provide filter values (e.g. 'driver=bridge')
--format string Pretty-print networks using a Go template
--help Print usage
--no-trunc Do not truncate the output
-q, --quiet Only display network IDs
```
## Description
Lists all the networks the Engine `daemon` knows about. This includes the
networks that span across multiple hosts in a cluster.
## Examples
### List all networks
```bash
$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
7fca4eb8c647 bridge bridge local
9f904ee27bf5 none null local
cf03ee007fb4 host host local
78b03ee04fc4 multi-host overlay swarm
```
Use the `--no-trunc` option to display the full network id:
```bash
$ docker network ls --no-trunc
NETWORK ID NAME DRIVER SCOPE
18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local
c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local
7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local
95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local
63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local
```
### Filtering
The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there
is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`).
Multiple filter flags are combined as an `OR` filter. For example,
`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks.
The currently supported filters are:
* driver
* id (network's id)
* label (`label=<key>` or `label=<key>=<value>`)
* name (network's name)
* scope (`swarm|global|local`)
* type (`custom|builtin`)
#### Driver
The `driver` filter matches networks based on their driver.
The following example matches networks with the `bridge` driver:
```bash
$ docker network ls --filter driver=bridge
NETWORK ID NAME DRIVER SCOPE
db9db329f835 test1 bridge local
f6e212da9dfd test2 bridge local
```
#### ID
The `id` filter matches on all or part of a network's ID.
The following filter matches all networks with an ID containing the
`63d1ff1f77b0...` string.
```bash
$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161
NETWORK ID NAME DRIVER SCOPE
63d1ff1f77b0 dev bridge local
```
You can also filter for a substring in an ID as this shows:
```bash
$ docker network ls --filter id=95e74588f40d
NETWORK ID NAME DRIVER SCOPE
95e74588f40d foo bridge local
$ docker network ls --filter id=95e
NETWORK ID NAME DRIVER SCOPE
95e74588f40d foo bridge local
```
#### Label
The `label` filter matches networks based on the presence of a `label` alone or a `label` and a
value.
The following filter matches networks with the `usage` label regardless of its value.
```bash
$ docker network ls -f "label=usage"
NETWORK ID NAME DRIVER SCOPE
db9db329f835 test1 bridge local
f6e212da9dfd test2 bridge local
```
The following filter matches networks with the `usage` label with the `prod` value.
```bash
$ docker network ls -f "label=usage=prod"
NETWORK ID NAME DRIVER SCOPE
f6e212da9dfd test2 bridge local
```
#### Name
The `name` filter matches on all or part of a network's name.
The following filter matches all networks with a name containing the `foobar` string.
```bash
$ docker network ls --filter name=foobar
NETWORK ID NAME DRIVER SCOPE
06e7eef0a170 foobar bridge local
```
You can also filter for a substring in a name as this shows:
```bash
$ docker network ls --filter name=foo
NETWORK ID NAME DRIVER SCOPE
95e74588f40d foo bridge local
06e7eef0a170 foobar bridge local
```
#### Scope
The `scope` filter matches networks based on their scope.
The following example matches networks with the `swarm` scope:
```bash
$ docker network ls --filter scope=swarm
NETWORK ID NAME DRIVER SCOPE
xbtm0v4f1lfh ingress overlay swarm
ic6r88twuu92 swarmnet overlay swarm
```
The following example matches networks with the `local` scope:
```bash
$ docker network ls --filter scope=local
NETWORK ID NAME DRIVER SCOPE
e85227439ac7 bridge bridge local
0ca0e19443ed host host local
ca13cc149a36 localnet bridge local
f9e115d2de35 none null local
```
#### Type
The `type` filter supports two values; `builtin` displays predefined networks
(`bridge`, `none`, `host`), whereas `custom` displays user defined networks.
The following filter matches all user defined networks:
```bash
$ docker network ls --filter type=custom
NETWORK ID NAME DRIVER SCOPE
95e74588f40d foo bridge local
63d1ff1f77b0 dev bridge local
```
By having this flag it allows for batch cleanup. For example, use this filter
to delete all user defined networks:
```bash
$ docker network rm `docker network ls --filter type=custom -q`
```
A warning will be issued when trying to remove a network that has containers
attached.
### Formatting
The formatting options (`--format`) pretty-prints networks output
using a Go template.
Valid placeholders for the Go template are listed below:
Placeholder | Description
-------------|------------------------------------------------------------------------------------------
`.ID` | Network ID
`.Name` | Network name
`.Driver` | Network driver
`.Scope` | Network scope (local, global)
`.IPv6` | Whether IPv6 is enabled on the network or not.
`.Internal` | Whether the network is internal or not.
`.Labels` | All labels assigned to the network.
`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}`
`.CreatedAt` | Time when the network was created
When using the `--format` option, the `network ls` command will either
output the data exactly as the template declares or, when using the
`table` directive, includes column headers as well.
The following example uses a template without headers and outputs the
`ID` and `Driver` entries separated by a colon for all networks:
```bash
$ docker network ls --format "{{.ID}}: {{.Driver}}"
afaaab448eb2: bridge
d1584f8dc718: host
391df270dc66: null
```
## Related commands
* [network disconnect ](network_disconnect.md)
* [network connect](network_connect.md)
* [network create](network_create.md)
* [network inspect](network_inspect.md)
* [network rm](network_rm.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)

View File

@ -0,0 +1,104 @@
---
title: "network prune"
description: "Remove unused networks"
keywords: "network, prune, delete"
---
# network prune
```markdown
Usage: docker network prune [OPTIONS]
Remove all unused networks
Options:
--filter filter Provide filter values (e.g. 'until=<timestamp>')
-f, --force Do not prompt for confirmation
--help Print usage
```
## Description
Remove all unused networks. Unused networks are those which are not referenced
by any containers.
## Examples
```bash
$ docker network prune
WARNING! This will remove all networks not used by at least one container.
Are you sure you want to continue? [y/N] y
Deleted Networks:
n1
n2
```
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* until (`<timestamp>`) - only remove networks created before given timestamp
* label (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) - only remove networks with (or without, in case `label!=...` is used) the specified labels.
The `until` filter can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the daemon machines time. Supported formats for date
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the daemon will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long.
The `label` filter accepts two formats. One is the `label=...` (`label=<key>` or `label=<key>=<value>`),
which removes networks with the specified labels. The other
format is the `label!=...` (`label!=<key>` or `label!=<key>=<value>`), which removes
networks without the specified labels.
The following removes networks created more than 5 minutes ago. Note that
system networks such as `bridge`, `host`, and `none` will never be pruned:
```none
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
7430df902d7a bridge bridge local
ea92373fd499 foo-1-day-ago bridge local
ab53663ed3c7 foo-1-min-ago bridge local
97b91972bc3b host host local
f949d337b1f5 none null local
$ docker network prune --force --filter until=5m
Deleted Networks:
foo-1-day-ago
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
7430df902d7a bridge bridge local
ab53663ed3c7 foo-1-min-ago bridge local
97b91972bc3b host host local
f949d337b1f5 none null local
```
## Related commands
* [network disconnect ](network_disconnect.md)
* [network connect](network_connect.md)
* [network create](network_create.md)
* [network ls](network_ls.md)
* [network inspect](network_inspect.md)
* [network rm](network_rm.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)
* [system df](system_df.md)
* [container prune](container_prune.md)
* [image prune](image_prune.md)
* [volume prune](volume_prune.md)
* [system prune](system_prune.md)

View File

@ -0,0 +1,68 @@
---
title: "network rm"
description: "the network rm command description and usage"
keywords: "network, rm, user-defined"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# network rm
```markdown
Usage: docker network rm NETWORK [NETWORK...]
Remove one or more networks
Aliases:
rm, remove
Options:
--help Print usage
```
## Description
Removes one or more networks by name or identifier. To remove a network,
you must first disconnect any containers connected to it.
## Examples
### Remove a network
To remove the network named 'my-network':
```bash
$ docker network rm my-network
```
### Remove multiple networks
To delete multiple networks in a single `docker network rm` command, provide
multiple network names or ids. The following example deletes a network with id
`3695c422697f` and a network named `my-network`:
```bash
$ docker network rm 3695c422697f my-network
```
When you specify multiple networks, the command attempts to delete each in turn.
If the deletion of one network fails, the command continues to the next on the
list and tries to delete that. The command reports success or failure for each
deletion.
## Related commands
* [network disconnect ](network_disconnect.md)
* [network connect](network_connect.md)
* [network create](network_create.md)
* [network ls](network_ls.md)
* [network inspect](network_inspect.md)
* [network prune](network_prune.md)
* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/)

View File

@ -0,0 +1,42 @@
---
title: "node"
description: "The node command description and usage"
keywords: "node"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node
```markdown
Usage: docker node COMMAND
Manage Swarm nodes
Options:
--help Print usage
Commands:
demote Demote one or more nodes from manager in the swarm
inspect Display detailed information on one or more nodes
ls List nodes in the swarm
promote Promote one or more nodes to manager in the swarm
ps List tasks running on one or more nodes, defaults to current node
rm Remove one or more nodes from the swarm
update Update a node
Run 'docker node COMMAND --help' for more information on a command.
```
## Description
Manage nodes.

View File

@ -0,0 +1,47 @@
---
title: "node demote"
description: "The node demote command description and usage"
keywords: "node, demote"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node demote
```markdown
Usage: docker node demote NODE [NODE...]
Demote one or more nodes from manager in the swarm
Options:
--help Print usage
```
## Description
Demotes an existing manager so that it is no longer a manager. This command
targets a docker engine that is a manager in the swarm.
## Examples
```bash
$ docker node demote <node name>
```
## Related commands
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -0,0 +1,167 @@
---
title: "node inspect"
description: "The node inspect command description and usage"
keywords: "node, inspect"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node inspect
```markdown
Usage: docker node inspect [OPTIONS] self|NODE [NODE...]
Display detailed information on one or more nodes
Options:
-f, --format string Format the output using the given Go template
--help Print usage
--pretty Print the information in a human friendly format
```
## Description
Returns information about a node. By default, this command renders all results
in a JSON array. You can specify an alternate format to execute a
given template for each result. Go's
[text/template](http://golang.org/pkg/text/template/) package describes all the
details of the format.
## Examples
### Inspect a node
```none
$ docker node inspect swarm-manager
[
{
"ID": "e216jshn25ckzbvmwlnh5jr3g",
"Version": {
"Index": 10
},
"CreatedAt": "2017-05-16T22:52:44.9910662Z",
"UpdatedAt": "2017-05-16T22:52:45.230878043Z",
"Spec": {
"Role": "manager",
"Availability": "active"
},
"Description": {
"Hostname": "swarm-manager",
"Platform": {
"Architecture": "x86_64",
"OS": "linux"
},
"Resources": {
"NanoCPUs": 1000000000,
"MemoryBytes": 1039843328
},
"Engine": {
"EngineVersion": "17.06.0-ce",
"Plugins": [
{
"Type": "Volume",
"Name": "local"
},
{
"Type": "Network",
"Name": "overlay"
},
{
"Type": "Network",
"Name": "null"
},
{
"Type": "Network",
"Name": "host"
},
{
"Type": "Network",
"Name": "bridge"
},
{
"Type": "Network",
"Name": "overlay"
}
]
},
"TLSInfo": {
"TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy\nNDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g\nAh8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO\nPQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3\nzONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==\n-----END CERTIFICATE-----\n",
"CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
"CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ=="
}
},
"Status": {
"State": "ready",
"Addr": "168.0.32.137"
},
"ManagerStatus": {
"Leader": true,
"Reachability": "reachable",
"Addr": "168.0.32.137:2377"
}
}
]
```
### Specify an output format
```none
$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self
false
$ docker node inspect --pretty self
ID: e216jshn25ckzbvmwlnh5jr3g
Hostname: swarm-manager
Joined at: 2017-05-16 22:52:44.9910662 +0000 utc
Status:
State: Ready
Availability: Active
Address: 172.17.0.2
Manager Status:
Address: 172.17.0.2:2377
Raft Status: Reachable
Leader: Yes
Platform:
Operating System: linux
Architecture: x86_64
Resources:
CPUs: 4
Memory: 7.704 GiB
Plugins:
Network: overlay, bridge, null, host, overlay
Volume: local
Engine Version: 17.06.0-ce
TLS Info:
TrustRoot:
-----BEGIN CERTIFICATE-----
MIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw
EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy
NDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
A0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g
Ah8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
Af8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO
PQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3
zONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==
-----END CERTIFICATE-----
Issuer Public Key: MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ==
Issuer Subject: MBMxETAPBgNVBAMTCHN3YXJtLWNh
```
## Related commands
* [node demote](node_demote.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -0,0 +1,171 @@
---
title: "node ls"
description: "The node ls command description and usage"
keywords: "node, list"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node ls
```markdown
Usage: docker node ls [OPTIONS]
List nodes in the swarm
Aliases:
ls, list
Options:
-f, --filter filter Filter output based on conditions provided
--format string Pretty-print nodes using a Go template
--help Print usage
-q, --quiet Only display IDs
```
## Description
Lists all the nodes that the Docker Swarm manager knows about. You can filter
using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section
for more information about available filter options.
## Examples
```bash
$ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
```
> **Note**:
> In the above example output, there is a hidden column of `.Self` that indicates if the
> node is the same node as the current docker daemon. A `*` (e.g., `e216jshn25ckzbvmwlnh5jr3g *`)
> means this node is the current docker daemon.
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* [id](node_ls.md#id)
* [label](node_ls.md#label)
* [membership](node_ls.md#membership)
* [name](node_ls.md#name)
* [role](node_ls.md#role)
#### id
The `id` filter matches all or part of a node's id.
```bash
$ docker node ls -f id=1
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
```
#### label
The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering.
The following filter matches nodes with the `foo` label regardless of its value.
```bash
$ docker node ls -f "label=foo"
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
```
#### membersip
The `membership` filter matches nodes based on the presence of a `membership` and a value
`accepted` or `pending`.
The following filter matches nodes with the `membership` of `accepted`.
```bash
$ docker node ls -f "membership=accepted"
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active
```
#### name
The `name` filter matches on all or part of a node hostname.
The following filter matches the nodes with a name equal to `swarm-master` string.
```bash
$ docker node ls -f name=swarm-manager1
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
```
#### role
The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`.
The following filter matches nodes with the `manager` role.
```bash
$ docker node ls -f "role=manager"
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
```
### Formatting
The formatting options (`--format`) pretty-prints nodes output
using a Go template.
Valid placeholders for the Go template are listed below:
Placeholder | Description
-----------------|------------------------------------------------------------------------------------------
`.ID` | Node ID
`.Self` | Node of the daemon (`true/false`, `true`indicates that the node is the same as current docker daemon)
`.Hostname` | Node hostname
`.Status` | Node status
`.Availability` | Node availability ("active", "pause", or "drain")
`.ManagerStatus` | Manager status of the node
`.TLSStatus` | TLS status of the node ("Ready", or "Needs Rotation" has TLS certificate signed by an old CA)
When using the `--format` option, the `node ls` command will either
output the data exactly as the template declares or, when using the
`table` directive, includes column headers as well.
The following example uses a template without headers and outputs the
`ID`, `Hostname`, and `TLS Status` entries separated by a colon for all nodes:
```bash
$ docker node ls --format "{{.ID}}: {{.Hostname}} {{.TLSStatus}}"
e216jshn25ckzbvmwlnh5jr3g: swarm-manager1 Ready
35o6tiywb700jesrt3dmllaza: swarm-worker1 Needs Rotation
``
## Related commands
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -0,0 +1,45 @@
---
title: "node promote"
description: "The node promote command description and usage"
keywords: "node, promote"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node promote
```markdown
Usage: docker node promote NODE [NODE...]
Promote one or more nodes to manager in the swarm
Options:
--help Print usage
```
## Description
Promotes a node to manager. This command targets a docker engine that is a
manager in the swarm.
## Examples
```bash
$ docker node promote <node name>
```
## Related commands
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -0,0 +1,148 @@
---
title: "node ps"
description: "The node ps command description and usage"
keywords: node, tasks, ps
aliases: ["/engine/reference/commandline/node_tasks/"]
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node ps
```markdown
Usage: docker node ps [OPTIONS] [NODE...]
List tasks running on one or more nodes, defaults to current node.
Options:
-f, --filter filter Filter output based on conditions provided
--format string Pretty-print tasks using a Go template
--help Print usage
--no-resolve Do not map IDs to Names
--no-trunc Do not truncate output
-q, --quiet Only display task IDs
```
## Description
Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options.
## Examples
```bash
$ docker node ps swarm-manager1
NAME IMAGE NODE DESIRED STATE CURRENT STATE
redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours
redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds
redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds
redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds
redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds
```
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* [name](#name)
* [id](#id)
* [label](#label)
* [desired-state](#desired-state)
#### name
The `name` filter matches on all or part of a task's name.
The following filter matches all tasks with a name containing the `redis` string.
```bash
$ docker node ps -f name=redis swarm-manager1
NAME IMAGE NODE DESIRED STATE CURRENT STATE
redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours
redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds
redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds
redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds
redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds
```
#### id
The `id` filter matches a task's id.
```bash
$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1
NAME IMAGE NODE DESIRED STATE CURRENT STATE
redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds
```
#### label
The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a
value.
The following filter matches tasks with the `usage` label regardless of its value.
```bash
$ docker node ps -f "label=usage"
NAME IMAGE NODE DESIRED STATE CURRENT STATE
redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes
redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes
```
#### desired-state
The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`.
### Formatting
The formatting options (`--format`) pretty-prints tasks output
using a Go template.
Valid placeholders for the Go template are listed below:
Placeholder | Description
----------------|------------------------------------------------------------------------------------------
`.Name` | Task name
`.Image` | Task image
`.Node` | Node ID
`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`)
`.CurrentState` | Current state of the task
`.Error` | Error
`.Ports` | Task published ports
When using the `--format` option, the `node ps` command will either
output the data exactly as the template declares or, when using the
`table` directive, includes column headers as well.
The following example uses a template without headers and outputs the
`Name` and `Image` entries separated by a colon for all tasks:
```bash
$ docker node ps --format "{{.Name}}: {{.Image}}"
top.1: busybox
top.2: busybox
top.3: busybox
```
## Related commands
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -0,0 +1,80 @@
---
title: "node rm"
description: "The node rm command description and usage"
keywords: "node, remove"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# node rm
```markdown
Usage: docker node rm [OPTIONS] NODE [NODE...]
Remove one or more nodes from the swarm
Aliases:
rm, remove
Options:
-f, --force Force remove a node from the swarm
--help Print usage
```
## Description
When run from a manager node, removes the specified nodes from a swarm.
## Examples
### Remove a stopped node from the swarm
```bash
$ docker node rm swarm-node-02
Node swarm-node-02 removed from swarm
```
### Attempt to remove a running node from a swarm
Removes the specified nodes from the swarm, but only if the nodes are in the
down state. If you attempt to remove an active node you will receive an error:
```non
$ docker node rm swarm-node-03
Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not
down and can't be removed
```
### Forcibly remove an inaccessible node from a swarm
If you lose access to a worker node or need to shut it down because it has been
compromised or is not behaving as expected, you can use the `--force` option.
This may cause transient errors or interruptions, depending on the type of task
being run on the node.
```bash
$ docker node rm --force swarm-node-03
Node swarm-node-03 removed from swarm
```
A manager node must be demoted to a worker node (using `docker node demote`)
before you can remove it from the swarm.
## Related commands
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node update](node_update.md)

View File

@ -0,0 +1,77 @@
---
title: "node update"
description: "The node update command description and usage"
keywords: "resources, update, dynamically"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# update
```markdown
Usage: docker node update [OPTIONS] NODE
Update a node
Options:
--availability string Availability of the node ("active"|"pause"|"drain")
--help Print usage
--label-add value Add or update a node label (key=value) (default [])
--label-rm value Remove a node label if exists (default [])
--role string Role of the node ("worker"|"manager")
```
## Description
Update metadata about a node, such as its availability, labels, or roles.
## Examples
### Add label metadata to a node
Add metadata to a swarm node using node labels. You can specify a node label as
a key with an empty value:
``` bash
$ docker node update --label-add foo worker1
```
To add multiple labels to a node, pass the `--label-add` flag for each label:
```bash
$ docker node update --label-add foo --label-add bar worker1
```
When you [create a service](service_create.md),
you can use node labels as a constraint. A constraint limits the nodes where the
scheduler deploys tasks for a service.
For example, to add a `type` label to identify nodes where the scheduler should
deploy message queue service tasks:
``` bash
$ docker node update --label-add type=queue worker1
```
The labels you set for nodes using `docker node update` apply only to the node
entity within the swarm. Do not confuse them with the docker daemon labels for
[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels).
For more information about labels, refer to [apply custom
metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/).
## Related commands
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)

View File

@ -0,0 +1,48 @@
---
title: "pause"
description: "The pause command description and usage"
keywords: "cgroups, container, suspend, SIGSTOP"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# pause
```markdown
Usage: docker pause CONTAINER [CONTAINER...]
Pause all processes within one or more containers
Options:
--help Print usage
```
## Description
The `docker pause` command suspends all processes in the specified containers.
On Linux, this uses the cgroups freezer. Traditionally, when suspending a process
the `SIGSTOP` signal is used, which is observable by the process being suspended.
With the cgroups freezer the process is unaware, and unable to capture,
that it is being suspended, and subsequently resumed. On Windows, only Hyper-V
containers can be paused.
See the
[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt)
for further details.
## Examples
```bash
$ docker pause my_container
```
## Related commands
* [unpause](unpause.md)

View File

@ -0,0 +1,44 @@
---
title: "plugin"
description: "The plugin command description and usage"
keywords: "plugin"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin
```markdown
Usage: docker plugin COMMAND
Manage plugins
Options:
--help Print usage
Commands:
create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
disable Disable a plugin
enable Enable a plugin
inspect Display detailed information on one or more plugins
install Install a plugin
ls List plugins
push Push a plugin to a registry
rm Remove one or more plugins
set Change settings for a plugin
upgrade Upgrade an existing plugin
Run 'docker plugin COMMAND --help' for more information on a command.
```
## Description
Manage plugins.

View File

@ -0,0 +1,66 @@
---
title: "plugin create"
description: "the plugin create command description and usage"
keywords: "plugin, create"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin create
```markdown
Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR
Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
Options:
--compress Compress the context using gzip
--help Print usage
```
## Description
Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as
[the config.json](../../extend/config.md)
## Examples
The following example shows how to create a sample `plugin`.
```bash
$ ls -ls /home/pluginDir
total 4
4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json
0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs
$ docker plugin create plugin /home/pluginDir
plugin
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
672d8144ec02 plugin latest A sample plugin for Docker false
```
The plugin can subsequently be enabled for local use or pushed to the public registry.
## Related commands
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,69 @@
---
title: "plugin disable"
description: "the plugin disable command description and usage"
keywords: "plugin, disable"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin disable
```markdown
Usage: docker plugin disable [OPTIONS] PLUGIN
Disable a plugin
Options:
-f, --force Force the disable of an active plugin
--help Print usage
```
## Description
Disables a plugin. The plugin must be installed before it can be disabled,
see [`docker plugin install`](plugin_install.md). Without the `-f` option,
a plugin that has references (e.g., volumes, networks) cannot be disabled.
## Examples
The following example shows that the `sample-volume-plugin` plugin is installed
and enabled:
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true
```
To disable the plugin, use the following command:
```bash
$ docker plugin disable tiborvass/sample-volume-plugin
tiborvass/sample-volume-plugin
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,68 @@
---
title: "plugin enable"
description: "the plugin enable command description and usage"
keywords: "plugin, enable"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin enable
```markdown
Usage: docker plugin enable [OPTIONS] PLUGIN
Enable a plugin
Options:
--help Print usage
--timeout int HTTP client timeout (in seconds)
```
## Description
Enables a plugin. The plugin must be installed before it can be enabled,
see [`docker plugin install`](plugin_install.md).
## Examples
The following example shows that the `sample-volume-plugin` plugin is installed,
but disabled:
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false
```
To enable the plugin, use the following command:
```bash
$ docker plugin enable tiborvass/sample-volume-plugin
tiborvass/sample-volume-plugin
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,166 @@
---
title: "plugin inspect"
description: "The plugin inspect command description and usage"
keywords: "plugin, inspect"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin inspect
```markdown
Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...]
Display detailed information on one or more plugins
Options:
-f, --format string Format the output using the given Go template
--help Print usage
```
## Description
Returns information about a plugin. By default, this command renders all results
in a JSON array.
## Examples
```none
$ docker plugin inspect tiborvass/sample-volume-plugin:latest
{
"Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21",
"Name": "tiborvass/sample-volume-plugin:latest",
"PluginReference": "tiborvas/sample-volume-plugin:latest",
"Enabled": true,
"Config": {
"Mounts": [
{
"Name": "",
"Description": "",
"Settable": null,
"Source": "/data",
"Destination": "/data",
"Type": "bind",
"Options": [
"shared",
"rbind"
]
},
{
"Name": "",
"Description": "",
"Settable": null,
"Source": null,
"Destination": "/foobar",
"Type": "tmpfs",
"Options": null
}
],
"Env": [
"DEBUG=1"
],
"Args": null,
"Devices": null
},
"Manifest": {
"ManifestVersion": "v0",
"Description": "A test plugin for Docker",
"Documentation": "https://docs.docker.com/engine/extend/plugins/",
"Interface": {
"Types": [
"docker.volumedriver/1.0"
],
"Socket": "plugins.sock"
},
"Entrypoint": [
"plugin-sample-volume-plugin",
"/data"
],
"Workdir": "",
"User": {
},
"Network": {
"Type": "host"
},
"Capabilities": null,
"Mounts": [
{
"Name": "",
"Description": "",
"Settable": null,
"Source": "/data",
"Destination": "/data",
"Type": "bind",
"Options": [
"shared",
"rbind"
]
},
{
"Name": "",
"Description": "",
"Settable": null,
"Source": null,
"Destination": "/foobar",
"Type": "tmpfs",
"Options": null
}
],
"Devices": [
{
"Name": "device",
"Description": "a host device to mount",
"Settable": null,
"Path": "/dev/cpu_dma_latency"
}
],
"Env": [
{
"Name": "DEBUG",
"Description": "If set, prints debug messages",
"Settable": null,
"Value": "1"
}
],
"Args": {
"Name": "args",
"Description": "command line arguments",
"Settable": null,
"Value": [
]
}
}
}
```
(output formatted for readability)
### Formatting the output
```bash
$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest
8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin enable](plugin_enable.md)
* [plugin disable](plugin_disable.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,75 @@
---
title: "plugin install"
description: "the plugin install command description and usage"
keywords: "plugin, install"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin install
```markdown
Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...]
Install a plugin
Options:
--alias string Local name for plugin
--disable Do not enable the plugin on install
--disable-content-trust Skip image verification (default true)
--grant-all-permissions Grant all permissions necessary to run the plugin
--help Print usage
```
## Description
Installs and enables a plugin. Docker looks first for the plugin on your Docker
host. If the plugin does not exist locally, then the plugin is pulled from
the registry. Note that the minimum required registry version to distribute
plugins is 2.3.0
## Examples
The following example installs `vieus/sshfs` plugin and [sets](plugin_set.md) its
`DEBUG` environment variable to `1`. To install, `pull` the plugin from Docker
Hub and prompt the user to accept the list of privileges that the plugin needs,
set the plugin's parameters and enable the plugin.
```bash
$ docker plugin install vieux/sshfs DEBUG=1
Plugin "vieux/sshfs" is requesting the following privileges:
- network: [host]
- device: [/dev/fuse]
- capabilities: [CAP_SYS_ADMIN]
Do you grant the above permissions? [y/N] y
vieux/sshfs
```
After the plugin is installed, it appears in the list of plugins:
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,118 @@
---
title: "plugin ls"
description: "The plugin ls command description and usage"
keywords: "plugin, list"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin ls
```markdown
Usage: docker plugin ls [OPTIONS]
List plugins
Aliases:
ls, list
Options:
-f, --filter filter Provide filter values (e.g. 'enabled=true')
--format string Pretty-print plugins using a Go template
--help Print usage
--no-trunc Don't truncate output
-q, --quiet Only display plugin IDs
```
## Description
Lists all the plugins that are currently installed. You can install plugins
using the [`docker plugin install`](plugin_install.md) command.
You can also filter using the `-f` or `--filter` flag.
Refer to the [filtering](#filtering) section for more information about available filter options.
## Examples
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true
```
### Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* enabled (boolean - true or false, 0 or 1)
* capability (string - currently `volumedriver`, `networkdriver`, `ipamdriver`, `logdriver`, `metricscollector`, or `authz`)
#### enabled
The `enabled` filter matches on plugins enabled or disabled.
#### capability
The `capability` filter matches on plugin capabilities. One plugin
might have multiple capabilities. Currently `volumedriver`, `networkdriver`,
`ipamdriver`, `logdriver`, `metricscollector`, and `authz` are supported capabilities.
```bash
$ docker plugin install --disable tiborvass/no-remove
tiborvass/no-remove
$ docker plugin ls --filter enabled=true
NAME TAG DESCRIPTION ENABLED
```
### Formatting
The formatting options (`--format`) pretty-prints plugins output
using a Go template.
Valid placeholders for the Go template are listed below:
Placeholder | Description
---------------|------------------------------------------------------------------------------------------
`.ID` | Plugin ID
`.Name` | Plugin name
`.Description` | Plugin description
`.Enabled` | Whether plugin is enabled or not
`.PluginReference` | The reference used to push/pull from a registry
When using the `--format` option, the `plugin ls` command will either
output the data exactly as the template declares or, when using the
`table` directive, includes column headers as well.
The following example uses a template without headers and outputs the
`ID` and `Name` entries separated by a colon for all plugins:
```bash
$ docker plugin ls --format "{{.ID}}: {{.Name}}"
4be01827a72e: tiborvass/no-remove
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,57 @@
---
title: "plugin push"
description: "the plugin push command description and usage"
keywords: "plugin, push"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
```markdown
Usage: docker plugin push [OPTIONS] PLUGIN[:TAG]
Push a plugin to a registry
Options:
--disable-content-trust Skip image signing (default true)
--help Print usage
```
## Description
After you have created a plugin using `docker plugin create` and the plugin is
ready for distribution, use `docker plugin push` to share your images to Docker
Hub or a self-hosted registry.
Registry credentials are managed by [docker login](login.md).
## Examples
The following example shows how to push a sample `user/plugin`.
```bash
$ docker plugin ls
ID NAME TAG DESCRIPTION ENABLED
69553ca1d456 user/plugin latest A sample plugin for Docker false
$ docker plugin push user/plugin
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,63 @@
---
title: "plugin rm"
description: "the plugin rm command description and usage"
keywords: "plugin, rm"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin rm
```markdown
Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...]
Remove one or more plugins
Aliases:
rm, remove
Options:
-f, --force Force the removal of an active plugin
--help Print usage
```
## Description
Removes a plugin. You cannot remove a plugin if it is enabled, you must disable
a plugin using the [`docker plugin disable`](plugin_disable.md) before removing
it (or use --force, use of force is not recommended, since it can affect
functioning of running containers using the plugin).
## Examples
The following example disables and removes the `sample-volume-plugin:latest`
plugin:
```bash
$ docker plugin disable tiborvass/sample-volume-plugin
tiborvass/sample-volume-plugin
$ docker plugin rm tiborvass/sample-volume-plugin:latest
tiborvass/sample-volume-plugin
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin set](plugin_set.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,172 @@
---
title: "plugin set"
description: "the plugin set command description and usage"
keywords: "plugin, set"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin set
```markdown
Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...]
Change settings for a plugin
Options:
--help Print usage
```
## Description
Change settings for a plugin. The plugin must be disabled.
The settings currently supported are:
* env variables
* source of mounts
* path of devices
* args
## What is settable ?
Look at the plugin manifest, it's easy to see what fields are settable,
by looking at the `Settable` field.
Here is an extract of a plugin manifest:
```
{
"config": {
...
"args": {
"name": "myargs",
"settable": ["value"],
"value": ["foo", "bar"]
},
"env": [
{
"name": "DEBUG",
"settable": ["value"],
"value": "0"
},
{
"name": "LOGGING",
"value": "1"
}
],
"devices": [
{
"name": "mydevice",
"path": "/dev/foo",
"settable": ["path"]
}
],
"mounts": [
{
"destination": "/baz",
"name": "mymount",
"options": ["rbind"],
"settable": ["source"],
"source": "/foo",
"type": "bind"
}
],
...
}
}
```
In this example, we can see that the `value` of the `DEBUG` environment variable is settable,
the `source` of the `mymount` mount is also settable. Same for the `path` of `mydevice` and `value` of `myargs`.
On the contrary, the `LOGGING` environment variable doesn't have any settable field, which implies that user cannot tweak it.
## Examples
### Change an environment variable
The following example change the env variable `DEBUG` on the
`sample-volume-plugin` plugin.
```bash
$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin
[DEBUG=0]
$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1
$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin
[DEBUG=1]
```
### Change the source of a mount
The following example change the source of the `mymount` mount on
the `myplugin` plugin.
```bash
$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin
/foo
$ docker plugins set myplugin mymount.source=/bar
$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin
/bar
```
> **Note**: Since only `source` is settable in `mymount`,
> `docker plugins set mymount=/bar myplugin` would work too.
### Change a device path
The following example change the path of the `mydevice` device on
the `myplugin` plugin.
```bash
$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin
/dev/foo
$ docker plugins set myplugin mydevice.path=/dev/bar
$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin
/dev/bar
```
> **Note**: Since only `path` is settable in `mydevice`,
> `docker plugins set mydevice=/dev/bar myplugin` would work too.
### Change the source of the arguments
The following example change the value of the args on the `myplugin` plugin.
```bash
$ docker plugin inspect -f '{{.Settings.Args}}' myplugin
["foo", "bar"]
$ docker plugins set myplugin myargs="foo bar baz"
$ docker plugin inspect -f '{{.Settings.Args}}' myplugin
["foo", "bar", "baz"]
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin upgrade](plugin_upgrade.md)

View File

@ -0,0 +1,100 @@
---
title: "plugin upgrade"
description: "the plugin upgrade command description and usage"
keywords: "plugin, upgrade"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# plugin upgrade
```markdown
Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE]
Upgrade a plugin
Options:
--disable-content-trust Skip image verification (default true)
--grant-all-permissions Grant all permissions necessary to run the plugin
--help Print usage
--skip-remote-check Do not check if specified remote plugin matches existing plugin image
```
## Description
Upgrades an existing plugin to the specified remote plugin image. If no remote
is specified, Docker will re-pull the current image and use the updated version.
All existing references to the plugin will continue to work.
The plugin must be disabled before running the upgrade.
## Examples
The following example installs `vieus/sshfs` plugin, uses it to create and use
a volume, then upgrades the plugin.
```bash
$ docker plugin install vieux/sshfs DEBUG=1
Plugin "vieux/sshfs:next" is requesting the following privileges:
- network: [host]
- device: [/dev/fuse]
- capabilities: [CAP_SYS_ADMIN]
Do you grant the above permissions? [y/N] y
vieux/sshfs:next
$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume
sshvolume
$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello"
$ docker plugin disable -f vieux/sshfs:next
viex/sshfs:next
# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled
$ docker volume ls
DRIVER VOLUME NAME
$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next
Plugin "vieux/sshfs:next" is requesting the following privileges:
- network: [host]
- device: [/dev/fuse]
- capabilities: [CAP_SYS_ADMIN]
Do you grant the above permissions? [y/N] y
Upgrade plugin vieux/sshfs:next to vieux/sshfs:next
$ docker plugin enable vieux/sshfs:next
viex/sshfs:next
$ docker volume ls
DRIVER VOLUME NAME
viuex/sshfs:next sshvolume
$ docker run -it -v sshvolume:/data alpine sh -c "ls /data"
hello
```
## Related commands
* [plugin create](plugin_create.md)
* [plugin disable](plugin_disable.md)
* [plugin enable](plugin_enable.md)
* [plugin inspect](plugin_inspect.md)
* [plugin install](plugin_install.md)
* [plugin ls](plugin_ls.md)
* [plugin push](plugin_push.md)
* [plugin rm](plugin_rm.md)
* [plugin set](plugin_set.md)

View File

@ -0,0 +1,47 @@
---
title: "port"
description: "The port command description and usage"
keywords: "port, mapping, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# port
```markdown
Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]]
List port mappings or a specific mapping for the container
Options:
--help Print usage
```
## Examples
### Show all mapped ports
You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or
just a specific mapping:
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test
$ docker port test
7890/tcp -> 0.0.0.0:4321
9876/tcp -> 0.0.0.0:1234
$ docker port test 7890/tcp
0.0.0.0:4321
$ docker port test 7890/udp
2014/06/24 11:53:36 Error: No public port '7890/udp' published for test
$ docker port test 7890
0.0.0.0:4321
```

View File

@ -0,0 +1,432 @@
---
title: "ps"
description: "The ps command description and usage"
keywords: "container, running, list"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# ps
```markdown
Usage: docker ps [OPTIONS]
List containers
Options:
-a, --all Show all containers (default shows just running)
-f, --filter value Filter output based on conditions provided (default [])
- ancestor=(<image-name>[:tag]|<image-id>|<image@digest>)
containers created from an image or a descendant.
- before=(<container-name>|<container-id>)
- expose=(<port>[/<proto>]|<startport-endport>/[<proto>])
- exited=<int> an exit code of <int>
- health=(starting|healthy|unhealthy|none)
- id=<ID> a container's ID
- isolation=(`default`|`process`|`hyperv`) (Windows daemon only)
- is-task=(true|false)
- label=<key> or label=<key>=<value>
- name=<string> a container's name
- network=(<network-id>|<network-name>)
- publish=(<port>[/<proto>]|<startport-endport>/[<proto>])
- since=(<container-name>|<container-id>)
- status=(created|restarting|removing|running|paused|exited)
- volume=(<volume name>|<mount point destination>)
--format string Pretty-print containers using a Go template
--help Print usage
-n, --last int Show n last created containers (includes all states) (default -1)
-l, --latest Show the latest created container (includes all states)
--no-trunc Don't truncate output
-q, --quiet Only display numeric IDs
-s, --size Display total file sizes
```
## Examples
### Prevent truncating output
Running `docker ps --no-trunc` showing 2 linked containers.
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
```
### Show both running and stopped containers
The `docker ps` command only shows running containers by default. To see all
containers, use the `-a` (or `--all`) flag:
```bash
$ docker ps -a
```
`docker ps` groups exposed ports into a single range if possible. E.g., a
container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in
the `PORTS` column.
### Filtering
The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more
than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* id (container's id)
* label (`label=<key>` or `label=<key>=<value>`)
* name (container's name)
* exited (int - the code of exited containers. Only useful with `--all`)
* status (`created|restarting|running|removing|paused|exited|dead`)
* ancestor (`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - filters containers that were created from the given image or a descendant.
* before (container's id or name) - filters containers created before given id or name
* since (container's id or name) - filters containers created since given id or name
* isolation (`default|process|hyperv`) (Windows daemon only)
* volume (volume name or mount point) - filters containers that mount volumes.
* network (network id or name) - filters containers connected to the provided network
* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status
* publish=(container's published port) - filters published ports by containers
* expose=(container's exposed port) - filters exposed ports by containers
#### label
The `label` filter matches containers based on the presence of a `label` alone or a `label` and a
value.
The following filter matches containers with the `color` label regardless of its value.
```bash
$ docker ps --filter "label=color"
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley
d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani
```
The following filter matches containers with the `color` label with the `blue` value.
```bash
$ docker ps --filter "label=color=blue"
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani
```
#### name
The `name` filter matches on all or part of a container's name.
The following filter matches all containers with a name containing the `nostalgic_stallman` string.
```bash
$ docker ps --filter "name=nostalgic_stallman"
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman
```
You can also filter for a substring in a name as this shows:
```bash
$ docker ps --filter "name=nostalgic"
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic
9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman
673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley
```
#### exited
The `exited` filter matches containers by exist status code. For example, to
filter for containers that have exited successfully:
```bash
$ docker ps -a --filter 'exited=0'
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey
106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani
48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds
```
#### Filter by exit signal
You can use a filter to locate containers that exited with status of `137`
meaning a `SIGKILL(9)` killed them.
```none
$ docker ps -a --filter 'exited=137'
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski
a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande
```
Any of these events result in a `137` status:
* the `init` process of the container is killed manually
* `docker kill` kills the container
* Docker daemon restarts which kills all running containers
#### status
The `status` filter matches containers by status. You can filter using
`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example,
to filter for `running` containers:
```bash
$ docker ps --filter status=running
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic
d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top
9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman
```
To filter for `paused` containers:
```bash
$ docker ps --filter status=paused
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley
```
#### ancestor
The `ancestor` filter matches containers based on its image or a descendant of
it. The filter supports the following image representation:
- image
- image:tag
- image:tag@digest
- short-id
- full-id
If you don't specify a `tag`, the `latest` tag is used. For example, to filter
for containers that use the latest `ubuntu` image:
```bash
$ docker ps --filter ancestor=ubuntu
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace
5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet
82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose
bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath
```
Match containers based on the `ubuntu-c1` image which, in this case, is a child
of `ubuntu`:
```bash
$ docker ps --filter ancestor=ubuntu-c1
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace
```
Match containers based on the `ubuntu` version `12.04.5` image:
```bash
$ docker ps --filter ancestor=ubuntu:12.04.5
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose
```
The following matches containers based on the layer `d0e008c6cf02` or an image
that have this layer in its layer stack.
```bash
$ docker ps --filter ancestor=d0e008c6cf02
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose
```
#### Create time
##### before
The `before` filter shows only containers created before the container with
given id or name. For example, having these containers created:
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky
4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton
6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat
```
Filtering with `before` would give:
```bash
$ docker ps -f before=9c3527ed70ce
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton
6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat
```
##### since
The `since` filter shows only containers created since the container with given
id or name. For example, with the same containers as in `before` filter:
```bash
$ docker ps -f since=6e63f6ff38b0
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky
4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton
```
#### volume
The `volume` filter shows only containers that mount a specific volume or have
a volume mounted in a specific path:
```bash
$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}"
CONTAINER ID MOUNTS
9c3527ed70ce remote-volume
$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}"
CONTAINER ID MOUNTS
9c3527ed70ce remote-volume
```
#### network
The `network` filter shows only containers that are connected to a network with
a given name or id.
The following filter matches all containers that are connected to a network
with a name containing `net1`.
```bash
$ docker run -d --net=net1 --name=test1 ubuntu top
$ docker run -d --net=net2 --name=test2 ubuntu top
$ docker ps --filter network=net1
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1
```
The network filter matches on both the network's name and id. The following
example shows all containers that are attached to the `net1` network, using
the network id as a filter;
```bash
$ docker network inspect --format "{{.ID}}" net1
8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5
$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1
```
#### publish and expose
The `publish` and `expose` filters show only containers that have published or exposed port with a given port
number, port range, and/or protocol. The default protocol is `tcp` when not specified.
The following filter matches all containers that have published port of 80:
```bash
$ docker run -d --publish=80 busybox top
$ docker run -d --expose=8080 busybox top
$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy
fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen
$ docker ps --filter publish=80
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen
```
The following filter matches all containers that have exposed TCP port in the range of `8000-8080`:
```bash
$ docker ps --filter expose=8000-8080/tcp
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy
```
The following filter matches all containers that have exposed UDP port `80`:
```bash
$ docker ps --filter publish=80/udp
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
```
### Formatting
The formatting option (`--format`) pretty-prints container output using a Go
template.
Valid placeholders for the Go template are listed below:
Placeholder | Description
--------------|----------------------------------------------------------------------------------------------------
`.ID` | Container ID
`.Image` | Image ID
`.Command` | Quoted command
`.CreatedAt` | Time when the container was created.
`.RunningFor` | Elapsed time since the container was started.
`.Ports` | Exposed ports.
`.Status` | Container status.
`.Size` | Container disk size.
`.Names` | Container names.
`.Labels` | All labels assigned to the container.
`.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'`
`.Mounts` | Names of the volumes mounted in this container.
`.Networks` | Names of the networks attached to this container.
When using the `--format` option, the `ps` command will either output the data
exactly as the template declares or, when using the `table` directive, includes
column headers as well.
The following example uses a template without headers and outputs the `ID` and
`Command` entries separated by a colon for all running containers:
```bash
$ docker ps --format "{{.ID}}: {{.Command}}"
a87ecb4f327c: /bin/sh -c #(nop) MA
01946d9d34d8: /bin/sh -c #(nop) MA
c1d3b0166030: /bin/sh -c yum -y up
41d50ecd2f57: /bin/sh -c #(nop) MA
```
To list all running containers with their labels in a table format you can use:
```bash
$ docker ps --format "table {{.ID}}\t{{.Labels}}"
CONTAINER ID LABELS
a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd
01946d9d34d8
c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6
41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd
```

View File

@ -0,0 +1,254 @@
---
title: "pull"
description: "The pull command description and usage"
keywords: "pull, image, hub, docker"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# pull
```markdown
Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST]
Pull an image or a repository from a registry
Options:
-a, --all-tags Download all tagged images in the repository
--disable-content-trust Skip image verification (default true)
--help Print usage
```
## Description
Most of your images will be created on top of a base image from the
[Docker Hub](https://hub.docker.com) registry.
[Docker Hub](https://hub.docker.com) contains many pre-built images that you
can `pull` and try without needing to define and configure your own.
To download a particular image, or set of images (i.e., a repository),
use `docker pull`.
### Proxy configuration
If you are behind an HTTP proxy server, for example in corporate settings,
before open a connect to registry, you may need to configure the Docker
daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`
environment variables. To set these environment variables on a host using
`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy)
for variables configuration.
### Concurrent downloads
By default the Docker daemon will pull three layers of an image at a time.
If you are on a low bandwidth connection this may cause timeout issues and you may want to lower
this via the `--max-concurrent-downloads` daemon option. See the
[daemon documentation](dockerd.md) for more details.
## Examples
### Pull an image from Docker Hub
To download a particular image, or set of images (i.e., a repository), use
`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a
default. This command pulls the `debian:latest` image:
```bash
$ docker pull debian
Using default tag: latest
latest: Pulling from library/debian
fdd5d7827f33: Pull complete
a3ed95caeb02: Pull complete
Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa
Status: Downloaded newer image for debian:latest
```
Docker images can consist of multiple layers. In the example above, the image
consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`.
Layers can be reused by images. For example, the `debian:jessie` image shares
both layers with `debian:latest`. Pulling the `debian:jessie` image therefore
only pulls its metadata, but not its layers, because all layers are already
present locally:
```bash
$ docker pull debian:jessie
jessie: Pulling from library/debian
fdd5d7827f33: Already exists
a3ed95caeb02: Already exists
Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e
Status: Downloaded newer image for debian:jessie
```
To see which images are present locally, use the [`docker images`](images.md)
command:
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
debian jessie f50f9524513f 5 days ago 125.1 MB
debian latest f50f9524513f 5 days ago 125.1 MB
```
Docker uses a content-addressable image store, and the image ID is a SHA256
digest covering the image's configuration and layers. In the example above,
`debian:jessie` and `debian:latest` have the same image ID because they are
actually the *same* image tagged with different names. Because they are the
same image, their layers are stored only once and do not consume extra disk
space.
For more information about images, layers, and the content-addressable store,
refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/).
### Pull an image by digest (immutable identifier)
So far, you've pulled images by their name (and "tag"). Using names and tags is
a convenient way to work with images. When using tags, you can `docker pull` an
image again to make sure you have the most up-to-date version of that image.
For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu
14.04 image.
In some cases you don't want images to be updated to newer versions, but prefer
to use a fixed version of an image. Docker enables you to pull an image by its
*digest*. When pulling an image by digest, you specify *exactly* which version
of an image to pull. Doing so, allows you to "pin" an image to that version,
and guarantee that the image you're using is always the same.
To know the digest of an image, pull the image first. Let's pull the latest
`ubuntu:14.04` image from Docker Hub:
```bash
$ docker pull ubuntu:14.04
14.04: Pulling from library/ubuntu
5a132a7e7af1: Pull complete
fd2731e4c50c: Pull complete
28a2f68d1120: Pull complete
a3ed95caeb02: Pull complete
Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
Status: Downloaded newer image for ubuntu:14.04
```
Docker prints the digest of the image after the pull has finished. In the example
above, the digest of the image is:
sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
Docker also prints the digest of an image when *pushing* to a registry. This
may be useful if you want to pin to a version of the image you just pushed.
A digest takes the place of the tag when pulling an image, for example, to
pull the above image by digest, run the following command:
```bash
$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu
5a132a7e7af1: Already exists
fd2731e4c50c: Already exists
28a2f68d1120: Already exists
a3ed95caeb02: Already exists
Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
```
Digest can also be used in the `FROM` of a Dockerfile, for example:
```Dockerfile
FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2
MAINTAINER some maintainer <maintainer@example.com>
```
> **Note**: Using this feature "pins" an image to a specific version in time.
> Docker will therefore not pull updated versions of an image, which may include
> security updates. If you want to pull an updated image, you need to change the
> digest accordingly.
### Pull from a different registry
By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to
manually specify the path of a registry to pull from. For example, if you have
set up a local registry, you can specify its path to pull from it. A registry
path is similar to a URL, but does not contain a protocol specifier (`https://`).
The following command pulls the `testing/test-image` image from a local registry
listening on port 5000 (`myregistry.local:5000`):
```bash
$ docker pull myregistry.local:5000/testing/test-image
```
Registry credentials are managed by [docker login](login.md).
Docker uses the `https://` protocol to communicate with a registry, unless the
registry is allowed to be accessed over an insecure connection. Refer to the
[insecure registries](dockerd.md#insecure-registries) section for more information.
### Pull a repository with multiple images
By default, `docker pull` pulls a *single* image from the registry. A repository
can contain multiple images. To pull all images from a repository, provide the
`-a` (or `--all-tags`) option when using `docker pull`.
This command pulls all images from the `fedora` repository:
```bash
$ docker pull --all-tags fedora
Pulling repository fedora
ad57ef8d78d7: Download complete
105182bb5e8b: Download complete
511136ea3c5a: Download complete
73bd853d2ea5: Download complete
....
Status: Downloaded newer image for fedora
```
After the pull has completed use the `docker images` command to see the
images that were pulled. The example below shows all the `fedora` images
that are present locally:
```bash
$ docker images fedora
REPOSITORY TAG IMAGE ID CREATED SIZE
fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB
fedora 20 105182bb5e8b 5 days ago 372.7 MB
fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB
fedora latest 105182bb5e8b 5 days ago 372.7 MB
```
### Cancel a pull
Killing the `docker pull` process, for example by pressing `CTRL-c` while it is
running in a terminal, will terminate the pull operation.
```bash
$ docker pull fedora
Using default tag: latest
latest: Pulling from library/fedora
a3ed95caeb02: Pulling fs layer
236608c7b546: Pulling fs layer
^C
```
> **Note**: Technically, the Engine terminates a pull operation when the
> connection between the Docker Engine daemon and the Docker Engine client
> initiating the pull is lost. If the connection with the Engine daemon is
> lost for other reasons than a manual interaction, the pull is also aborted.

View File

@ -0,0 +1,82 @@
---
title: "push"
description: "The push command description and usage"
keywords: "share, push, image"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# push
```markdown
Usage: docker push [OPTIONS] NAME[:TAG]
Push an image or a repository to a registry
Options:
--disable-content-trust Skip image signing (default true)
--help Print usage
```
## Description
Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com)
registry or to a self-hosted one.
Refer to the [`docker tag`](tag.md) reference for more information about valid
image and tag names.
Killing the `docker push` process, for example by pressing `CTRL-c` while it is
running in a terminal, terminates the push operation.
Progress bars are shown during docker push, which show the uncompressed size. The
actual amount of data that's pushed will be compressed before sending, so the uploaded
size will not be reflected by the progress bar.
Registry credentials are managed by [docker login](login.md).
### Concurrent uploads
By default the Docker daemon will push five layers of an image at a time.
If you are on a low bandwidth connection this may cause timeout issues and you may want to lower
this via the `--max-concurrent-uploads` daemon option. See the
[daemon documentation](dockerd.md) for more details.
## Examples
### Push a new image to a registry
First save the new image by finding the container ID (using [`docker ps`](ps.md))
and then committing it to a new image name. Note that only `a-z0-9-_.` are
allowed when naming images:
```bash
$ docker commit c16378f943fe rhel-httpd
```
Now, push the image to the registry using the image ID. In this example the
registry is on host named `registry-host` and listening on port `5000`. To do
this, tag the image with the host name or IP address, and the port of the
registry:
```bash
$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd
$ docker push registry-host:5000/myadmin/rhel-httpd
```
Check that this worked by running:
```bash
$ docker images
```
You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd`
listed.

View File

@ -0,0 +1,35 @@
---
title: "rename"
description: "The rename command description and usage"
keywords: "rename, docker, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# rename
```markdown
Usage: docker rename CONTAINER NEW_NAME
Rename a container
Options:
--help Print usage
```
## Description
The `docker rename` command renames a container.
## Examples
```bash
$ docker rename my_container my_new_container
```

View File

@ -0,0 +1,32 @@
---
title: "restart"
description: "The restart command description and usage"
keywords: "restart, container, Docker"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# restart
```markdown
Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...]
Restart one or more containers
Options:
--help Print usage
-t, --time int Seconds to wait for stop before killing the container (default 10)
```
## Examples
```bash
$ docker restart my_container
```

View File

@ -0,0 +1,100 @@
---
title: "rm"
description: "The rm command description and usage"
keywords: "remove, Docker, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# rm
```markdown
Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...]
Remove one or more containers
Options:
-f, --force Force the removal of a running container (uses SIGKILL)
--help Print usage
-l, --link Remove the specified link
-v, --volumes Remove the volumes associated with the container
```
## Examples
### Remove a container
This will remove the container referenced under the link
`/redis`.
```bash
$ docker rm /redis
/redis
```
### Remove a link specified with `--link` on the default bridge network
This will remove the underlying link between `/webapp` and the `/redis`
containers on the default bridge network, removing all network communication
between the two containers. This does not apply when `--link` is used with
user-specified networks.
```bash
$ docker rm --link /webapp/redis
/webapp/redis
```
### Force-remove a running container
This command will force-remove a running container.
```bash
$ docker rm --force redis
redis
```
The main process inside the container referenced under the link `redis` will receive
`SIGKILL`, then the container will be removed.
### Remove all stopped containers
```bash
$ docker rm $(docker ps -a -q)
```
This command will delete all stopped containers. The command
`docker ps -a -q` will return all existing container IDs and pass them to
the `rm` command which will delete them. Any running containers will not be
deleted.
### Remove a container and its volumes
```bash
$ docker rm -v redis
redis
```
This command will remove the container and any volumes associated with it.
Note that if a volume was specified with a name, it will not be removed.
### Remove a container and selectively remove volumes
```bash
$ docker create -v awesome:/foo -v /bar --name hello redis
hello
$ docker rm -v hello
```
In this example, the volume for `/foo` will remain intact, but the volume for
`/bar` will be removed. The same behavior holds for volumes inherited with
`--volumes-from`.

View File

@ -0,0 +1,105 @@
---
title: "rmi"
description: "The rmi command description and usage"
keywords: "remove, image, Docker"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# rmi
```markdown
Usage: docker rmi [OPTIONS] IMAGE [IMAGE...]
Remove one or more images
Options:
-f, --force Force removal of the image
--help Print usage
--no-prune Do not delete untagged parents
```
## Examples
You can remove an image using its short or long ID, its tag, or its digest. If
an image has one or more tag referencing it, you must remove all of them before
the image is removed. Digest references are removed automatically when an image
is removed by tag.
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ docker rmi fd484f19954f
Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force
2013/12/11 05:47:16 Error: failed to remove one or more images
$ docker rmi test1
Untagged: test1:latest
$ docker rmi test2
Untagged: test2:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ docker rmi test
Untagged: test:latest
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
```
If you use the `-f` flag and specify the image's short or long ID, then this
command untags and removes all images that match the specified ID.
```bash
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ docker rmi -f fd484f19954f
Untagged: test1:latest
Untagged: test:latest
Untagged: test2:latest
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
```
An image pulled by digest has no tag associated with it:
```bash
$ docker images --digests
REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE
localhost:5000/test/busybox <none> sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB
```
To remove an image using its digest:
```bash
$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125
Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2
Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b
```

View File

@ -0,0 +1,777 @@
---
title: "run"
description: "The run command description and usage"
keywords: "run, command, container"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# run
```markdown
Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
Run a command in a new container
Options:
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
-a, --attach value Attach to STDIN, STDOUT or STDERR (default [])
--blkio-weight value Block IO (relative weight), between 10 and 1000
--blkio-weight-device value Block IO weight (relative device weight) (default [])
--cap-add value Add Linux capabilities (default [])
--cap-drop value Drop Linux capabilities (default [])
--cgroup-parent string Optional parent cgroup for the container
--cidfile string Write the container ID to the file
--cpu-count int The number of CPUs available for execution by the container.
Windows daemon only. On Windows Server containers, this is
approximated as a percentage of total CPU usage.
--cpu-percent int Limit percentage of CPU available for execution
by the container. Windows daemon only.
The processor resource controls are mutually
exclusive, the order of precedence is CPUCount
first, then CPUShares, and CPUPercent last.
--cpu-period int Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota
-c, --cpu-shares int CPU shares (relative weight)
--cpus NanoCPUs Number of CPUs (default 0.000)
--cpu-rt-period int Limit the CPU real-time period in microseconds
--cpu-rt-runtime int Limit the CPU real-time runtime in microseconds
--cpuset-cpus string CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems string MEMs in which to allow execution (0-3, 0,1)
-d, --detach Run container in background and print container ID
--detach-keys string Override the key sequence for detaching a container
--device value Add a host device to the container (default [])
--device-cgroup-rule value Add a rule to the cgroup allowed devices list
--device-read-bps value Limit read rate (bytes per second) from a device (default [])
--device-read-iops value Limit read rate (IO per second) from a device (default [])
--device-write-bps value Limit write rate (bytes per second) to a device (default [])
--device-write-iops value Limit write rate (IO per second) to a device (default [])
--disable-content-trust Skip image verification (default true)
--dns value Set custom DNS servers (default [])
--dns-option value Set DNS options (default [])
--dns-search value Set custom DNS search domains (default [])
--entrypoint string Overwrite the default ENTRYPOINT of the image
-e, --env value Set environment variables (default [])
--env-file value Read in a file of environment variables (default [])
--expose value Expose a port or a range of ports (default [])
--group-add value Add additional groups to join (default [])
--health-cmd string Command to run to check health
--health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s)
--health-retries int Consecutive failures needed to report unhealthy
--health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)
--health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s)
--help Print usage
-h, --hostname string Container host name
--init Run an init inside the container that forwards signals and reaps processes
-i, --interactive Keep STDIN open even if not attached
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
(Windows only). The format is `<number><unit>`.
Unit is optional and can be `b` (bytes per second),
`k` (kilobytes per second), `m` (megabytes per second),
or `g` (gigabytes per second). If you omit the unit,
the system uses bytes per second.
--io-maxbandwidth and --io-maxiops are mutually exclusive options.
--io-maxiops uint Maximum IOps limit for the system drive (Windows only)
--ip string IPv4 address (e.g., 172.30.100.104)
--ip6 string IPv6 address (e.g., 2001:db8::33)
--ipc string IPC namespace to use
--isolation string Container isolation technology
--kernel-memory string Kernel memory limit
-l, --label value Set meta data on a container (default [])
--label-file value Read in a line delimited file of labels (default [])
--link value Add link to another container (default [])
--link-local-ip value Container IPv4/IPv6 link-local addresses (default [])
--log-driver string Logging driver for the container
--log-opt value Log driver options (default [])
--mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33)
-m, --memory string Memory limit
--memory-reservation string Memory soft limit
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness int Tune container memory swappiness (0 to 100) (default -1)
--mount value Attach a filesystem mount to the container (default [])
--name string Assign a name to the container
--network-alias value Add network-scoped alias for the container (default [])
--network string Connect a container to a network
'bridge': create a network stack on the default Docker bridge
'none': no networking
'container:<name|id>': reuse another container's network stack
'host': use the Docker host network stack
'<network-name>|<network-id>': connect to a user-defined network
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj int Tune host's OOM preferences (-1000 to 1000)
--pid string PID namespace to use
--pids-limit int Tune container pids limit (set -1 for unlimited)
--privileged Give extended privileges to this container
-p, --publish value Publish a container's port(s) to the host (default [])
-P, --publish-all Publish all exposed ports to random ports
--read-only Mount the container's root filesystem as read only
--restart string Restart policy to apply when a container exits (default "no")
Possible values are : no, on-failure[:max-retry], always, unless-stopped
--rm Automatically remove the container when it exits
--runtime string Runtime to use for this container
--security-opt value Security Options (default [])
--shm-size bytes Size of /dev/shm
The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
or `g` (gigabytes). If you omit the unit, the system uses bytes.
--sig-proxy Proxy received signals to the process (default true)
--stop-signal string Signal to stop a container (default "SIGTERM")
--stop-timeout=10 Timeout (in seconds) to stop a container
--storage-opt value Storage driver options for the container (default [])
--sysctl value Sysctl options (default map[])
--tmpfs value Mount a tmpfs directory (default [])
-t, --tty Allocate a pseudo-TTY
--ulimit value Ulimit options (default [])
-u, --user string Username or UID (format: <name|uid>[:<group|gid>])
--userns string User namespace to use
'host': Use the Docker host user namespace
'': Use the Docker daemon user namespace specified by `--userns-remap` option.
--uts string UTS namespace to use
-v, --volume value Bind mount a volume (default []). The format
is `[host-src:]container-dest[:<options>]`.
The comma-delimited `options` are [rw|ro],
[z|Z], [[r]shared|[r]slave|[r]private],
[delegated|cached|consistent], and
[nocopy]. The 'host-src' is an absolute path
or a name value.
--volume-driver string Optional volume driver for the container
--volumes-from value Mount volumes from the specified container(s) (default [])
-w, --workdir string Working directory inside the container
```
## Description
The `docker run` command first `creates` a writeable container layer over the
specified image, and then `starts` it using the specified command. That is,
`docker run` is equivalent to the API `/containers/create` then
`/containers/(id)/start`. A stopped container can be restarted with all its
previous changes intact using `docker start`. See `docker ps -a` to view a list
of all containers.
The `docker run` command can be used in combination with `docker commit` to
[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md).
For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/).
## Examples
### Assign name and allocate pseudo-TTY (--name, -it)
```bash
$ docker run --name test -it debian
root@d6c0fe130dba:/# exit 13
$ echo $?
13
$ docker ps -a | grep test
d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test
```
This example runs a container named `test` using the `debian:latest`
image. The `-it` instructs Docker to allocate a pseudo-TTY connected to
the container's stdin; creating an interactive `bash` shell in the container.
In the example, the `bash` shell is quit by entering
`exit 13`. This exit code is passed on to the caller of
`docker run`, and is recorded in the `test` container's metadata.
### Capture container ID (--cidfile)
```bash
$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
```
This will create a container and print `test` to the console. The `cidfile`
flag makes Docker attempt to create a new file and write the container ID to it.
If the file exists already, Docker will return an error. Docker will close this
file when `docker run` exits.
### Full container capabilities (--privileged)
```bash
$ docker run -t -i --rm ubuntu bash
root@bc338942ef20:/# mount -t tmpfs none /mnt
mount: permission denied
```
This will *not* work, because by default, most potentially dangerous kernel
capabilities are dropped; including `cap_sys_admin` (which is required to mount
filesystems). However, the `--privileged` flag will allow it to run:
```bash
$ docker run -t -i --privileged ubuntu bash
root@50e3f57e16e6:/# mount -t tmpfs none /mnt
root@50e3f57e16e6:/# df -h
Filesystem Size Used Avail Use% Mounted on
none 1.9G 0 1.9G 0% /mnt
```
The `--privileged` flag gives *all* capabilities to the container, and it also
lifts all the limitations enforced by the `device` cgroup controller. In other
words, the container can then do almost everything that the host can do. This
flag exists to allow special use-cases, like running Docker within Docker.
### Set working directory (-w)
```bash
$ docker run -w /path/to/dir/ -i -t ubuntu pwd
```
The `-w` lets the command being executed inside directory given, here
`/path/to/dir/`. If the path does not exist it is created inside the container.
### Set storage driver options per container
```bash
$ docker run -it --storage-opt size=120G fedora /bin/bash
```
This (size) will allow to set the container rootfs size to 120G at creation time.
This option is only available for the `devicemapper`, `btrfs`, `overlay2`,
`windowsfilter` and `zfs` graph drivers.
For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers,
user cannot pass a size less than the Default BaseFS Size.
For the `overlay2` storage driver, the size option is only available if the
backing fs is `xfs` and mounted with the `pquota` mount option.
Under these conditions, user can pass any size less then the backing fs size.
### Mount tmpfs (--tmpfs)
```bash
$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image
```
The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`,
`noexec`, `nosuid`, `size=65536k` options.
### Mount volume (-v, --read-only)
```bash
$ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
```
The `-v` flag mounts the current working directory into the container. The `-w`
lets the command being executed inside the current working directory, by
changing into the directory to the value returned by `pwd`. So this
combination executes the command using the container, but inside the
current working directory.
```bash
$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash
```
When the host directory of a bind-mounted volume doesn't exist, Docker
will automatically create this directory on the host for you. In the
example above, Docker will create the `/doesnt/exist`
folder before starting your container.
```bash
$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here
```
Volumes can be used in combination with `--read-only` to control where
a container writes files. The `--read-only` flag mounts the container's root
filesystem as read only prohibiting writes to locations other than the
specified volumes for the container.
```bash
$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh
```
By bind-mounting the docker unix socket and statically linked docker
binary (refer to [get the linux binary](
https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)),
you give the container the full access to create and manipulate the host's
Docker daemon.
On Windows, the paths must be specified using Windows-style semantics.
```powershell
PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt
Contents of file
PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt
Contents of file
```
The following examples will fail when using Windows-based containers, as the
destination of a volume or bind-mount inside the container must be one of:
a non-existing or empty directory; or a drive other than C:. Further, the source
of a bind mount must be a local directory, not a file.
```powershell
net use z: \\remotemachine\share
docker run -v z:\foo:c:\dest ...
docker run -v \\uncpath\to\directory:c:\dest ...
docker run -v c:\foo\somefile.txt:c:\dest ...
docker run -v c:\foo:c: ...
docker run -v c:\foo:c:\existing-directory-with-contents ...
```
For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/)
### Add bind-mounts or volumes using the --mount flag
The `--mount` flag allows you to mount volumes, host-directories and `tmpfs`
mounts in a container.
The `--mount` flag supports most options that are supported by the `-v` or the
`--volume` flag, but uses a different syntax. For in-depth information on the
`--mount` flag, and a comparison between `--volume` and `--mount`, refer to
the [service create command reference](service_create.md#add-bind-mounts-or-volumes).
Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended.
Examples:
```bash
$ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here
```
```bash
$ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh
```
### Publish or expose port (-p, --expose)
```bash
$ docker run -p 127.0.0.1:80:8080 ubuntu bash
```
This binds port `8080` of the container to port `80` on `127.0.0.1` of the host
machine. The [Docker User
Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/)
explains in detail how to manipulate ports in Docker.
```bash
$ docker run --expose 80 ubuntu bash
```
This exposes port `80` of the container without publishing the port to the host
system's interfaces.
### Set environment variables (-e, --env, --env-file)
```bash
$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
```
Use the `-e`, `--env`, and `--env-file` flags to set simple (non-array)
environment variables in the container you're running, or overwrite variables
that are defined in the Dockerfile of the image you're running.
You can define the variable and its value when running the container:
```bash
$ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR
VAR1=value1
VAR2=value2
```
You can also use variables that you've exported to your local environment:
```bash
export VAR1=value1
export VAR2=value2
$ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR
VAR1=value1
VAR2=value2
```
When running the command, the Docker CLI client checks the value the variable
has in your local environment and passes it to the container.
If no `=` is provided and that variable is not exported in your local
environment, the variable won't be set in the container.
You can also load the environment variables from a file. This file should use
the syntax `<variable>=value` (which sets the variable to the given value) or
`<variable>` (which takes the value from the local environment), and `#` for comments.
```bash
$ cat env.list
# This is a comment
VAR1=value1
VAR2=value2
USER
$ docker run --env-file env.list ubuntu env | grep VAR
VAR1=value1
VAR2=value2
USER=denis
```
### Set metadata on container (-l, --label, --label-file)
A label is a `key=value` pair that applies metadata to a container. To label a container with two labels:
```bash
$ docker run -l my-label --label com.example.foo=bar ubuntu bash
```
The `my-label` key doesn't specify a value so the label defaults to an empty
string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
The `key=value` must be unique to avoid overwriting the label value. If you
specify labels with identical keys but different values, each subsequent value
overwrites the previous. Docker uses the last `key=value` you supply.
Use the `--label-file` flag to load multiple labels from a file. Delimit each
label in the file with an EOL mark. The example below loads labels from a
labels file in the current directory:
```bash
$ docker run --label-file ./labels ubuntu bash
```
The label-file format is similar to the format for loading environment
variables. (Unlike environment variables, labels are not visible to processes
running inside a container.) The following example illustrates a label-file
format:
```none
com.example.label1="a label"
# this is a comment
com.example.label2=another\ label
com.example.label3
```
You can load multiple label-files by supplying multiple `--label-file` flags.
For additional information on working with labels, see [*Labels - custom
metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User
Guide.
### Connect a container to a network (--network)
When you start a container use the `--network` flag to connect it to a network.
This adds the `busybox` container to the `my-net` network.
```bash
$ docker run -itd --network=my-net busybox
```
You can also choose the IP addresses for the container with `--ip` and `--ip6`
flags when you start the container on a user-defined network.
```bash
$ docker run -itd --network=my-net --ip=10.10.9.75 busybox
```
If you want to add a running container to a network use the `docker network connect` subcommand.
You can connect multiple containers to the same network. Once connected, the
containers can communicate easily need only another container's IP address
or name. For `overlay` networks or custom plugins that support multi-host
connectivity, containers connected to the same multi-host network but launched
from different Engines can also communicate in this way.
> **Note**: Service discovery is unavailable on the default bridge network.
> Containers can communicate via their IP addresses by default. To communicate
> by name, they must be linked.
You can disconnect a container from a network using the `docker network
disconnect` command.
### Mount volumes from container (--volumes-from)
```bash
$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
```
The `--volumes-from` flag mounts all the defined volumes from the referenced
containers. Containers can be specified by repetitions of the `--volumes-from`
argument. The container ID may be optionally suffixed with `:ro` or `:rw` to
mount the volumes in read-only or read-write mode, respectively. By default,
the volumes are mounted in the same mode (read write or read only) as
the reference container.
Labeling systems like SELinux require that proper labels are placed on volume
content mounted into a container. Without a label, the security system might
prevent the processes running inside the container from using the content. By
default, Docker does not change the labels set by the OS.
To change the label in the container context, you can add either of two suffixes
`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file
objects on the shared volumes. The `z` option tells Docker that two containers
share the volume content. As a result, Docker labels the content with a shared
content label. Shared volume labels allow all containers to read/write content.
The `Z` option tells Docker to label the content with a private unshared label.
Only the current container can use a private volume.
### Attach to STDIN/STDOUT/STDERR (-a)
The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT`
or `STDERR`. This makes it possible to manipulate the output and input as
needed.
```bash
$ echo "test" | docker run -i -a stdin ubuntu cat -
```
This pipes data into a container and prints the container's ID by attaching
only to the container's `STDIN`.
```bash
$ docker run -a stderr ubuntu echo test
```
This isn't going to print anything unless there's an error because we've
only attached to the `STDERR` of the container. The container's logs
still store what's been written to `STDERR` and `STDOUT`.
```bash
$ cat somefile | docker run -i -a stdin mybuilder dobuild
```
This is how piping a file into a container could be done for a build.
The container's ID will be printed after the build is done and the build
logs could be retrieved using `docker logs`. This is
useful if you need to pipe a file or something else into a container and
retrieve the container's ID once the container has finished running.
### Add host device to container (--device)
```bash
$ docker run --device=/dev/sdc:/dev/xvdc \
--device=/dev/sdd --device=/dev/zero:/dev/nulo \
-i -t \
ubuntu ls -l /dev/{xvdc,sdd,nulo}
brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc
brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd
crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo
```
It is often necessary to directly expose devices to a container. The `--device`
option enables that. For example, a specific block storage device or loop
device or audio device can be added to an otherwise unprivileged container
(without the `--privileged` flag) and have the application directly access it.
By default, the container will be able to `read`, `write` and `mknod` these devices.
This can be overridden using a third `:rwm` set of options to each `--device`
flag:
```bash
$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc
Command (m for help): q
$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc
You will not be able to write the partition table.
Command (m for help): q
$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc
Command (m for help): q
$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc
fdisk: unable to open /dev/xvdc: Operation not permitted
```
> **Note**: `--device` cannot be safely used with ephemeral devices. Block devices
> that may be removed should not be added to untrusted containers with
> `--device`.
### Restart policies (--restart)
Use Docker's `--restart` to specify a container's *restart policy*. A restart
policy controls whether the Docker daemon restarts a container after exit.
Docker supports the following restart policies:
| Policy | Result |
|:----------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `no` | Do not automatically restart the container when it exits. This is the default. |
| `failure` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. |
| `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container. |
```bash
$ docker run --restart=always redis
```
This will run the `redis` container with a restart policy of **always**
so that if the container exits, Docker will restart it.
More detailed information on restart policies can be found in the
[Restart Policies (--restart)](../run.md#restart-policies-restart)
section of the Docker run reference page.
### Add entries to container hosts file (--add-host)
You can add other hosts into a container's `/etc/hosts` file by using one or
more `--add-host` flags. This example adds a static address for a host named
`docker`:
```bash
$ docker run --add-host=docker:10.180.0.1 --rm -it debian
root@f38c87f2a42d:/# ping docker
PING docker (10.180.0.1): 48 data bytes
56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms
56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms
^C--- docker ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms
```
Sometimes you need to connect to the Docker host from within your
container. To enable this, pass the Docker host's IP address to
the container using the `--add-host` flag. To find the host's address,
use the `ip addr show` command.
The flags you pass to `ip addr show` depend on whether you are
using IPv4 or IPv6 networking in your containers. Use the following
flags for IPv4 address retrieval for a network device named `eth0`:
```bash
$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1`
$ docker run --add-host=docker:${HOSTIP} --rm -it debian
```
For IPv6 use the `-6` flag instead of the `-4` flag. For other network
devices, replace `eth0` with the correct device name (for example `docker0`
for the bridge device).
### Set ulimits in container (--ulimit)
Since setting `ulimit` settings in a container requires extra privileges not
available in the default container, you can set these using the `--ulimit` flag.
`--ulimit` is specified with a soft and hard limit as such:
`<type>=<soft limit>[:<hard limit>]`, for example:
```bash
$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n"
1024
```
> **Note**: If you do not provide a `hard limit`, the `soft limit` will be used
> for both values. If no `ulimits` are set, they will be inherited from
> the default `ulimits` set on the daemon. `as` option is disabled now.
> In other words, the following script is not supported:
>
> ```bash
> $ docker run -it --ulimit as=1024 fedora /bin/bash`
> ```
The values are sent to the appropriate `syscall` as they are set.
Docker doesn't perform any byte conversion. Take this into account when setting the values.
#### For `nproc` usage
Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the
maximum number of processes available to a user, not to a container. For example, start four
containers with `daemon` user:
```bash
$ docker run -d -u daemon --ulimit nproc=3 busybox top
$ docker run -d -u daemon --ulimit nproc=3 busybox top
$ docker run -d -u daemon --ulimit nproc=3 busybox top
$ docker run -d -u daemon --ulimit nproc=3 busybox top
```
The 4th container fails and reports "[8] System error: resource temporarily unavailable" error.
This fails because the caller set `nproc=3` resulting in the first three containers using up
the three processes quota set for the `daemon` user.
### Stop container with signal (--stop-signal)
The `--stop-signal` flag sets the system call signal that will be sent to the container to exit.
This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9,
or a signal name in the format SIGNAME, for instance SIGKILL.
### Optional security options (--security-opt)
On Windows, this flag can be used to specify the `credentialspec` option.
The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`.
### Stop container with timeout (--stop-timeout)
The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call
signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL.
### Specify isolation technology for container (--isolation)
This option is useful in situations where you are running Docker containers on
Windows. The `--isolation <value>` option sets a container's isolation technology.
On Linux, the only supported is the `default` option which uses
Linux namespaces. These two commands are equivalent on Linux:
```bash
$ docker run -d busybox top
$ docker run -d --isolation default busybox top
```
On Windows, `--isolation` can take one of these values:
| Value | Description |
|:----------|:-------------------------------------------------------------------------------------------|
| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). |
| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems). |
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
The default isolation on Windows server operating systems is `process`. The default (and only supported)
isolation on Windows client operating systems is `hyperv`. An attempt to start a container on a client
operating system with `--isolation process` will fail.
On Windows server, assuming the default configuration, these commands are equivalent
and result in `process` isolation:
```PowerShell
PS C:\> docker run -d microsoft/nanoserver powershell echo process
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process
PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process
```
If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or
are running against a Windows client-based daemon, these commands are equivalent and
result in `hyperv` isolation:
```PowerShell
PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv
PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv
```
### Configure namespaced kernel parameters (sysctls) at runtime
The `--sysctl` sets namespaced kernel parameters (sysctls) in the
container. For example, to turn on IP forwarding in the containers
network namespace, run this command:
```bash
$ docker run --sysctl net.ipv4.ip_forward=1 someimage
```
> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls
> inside of a container that also modify the host system. As the kernel
> evolves we expect to see more sysctls become namespaced.
#### Currently supported sysctls
- `IPC Namespace`:
```none
kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced
Sysctls beginning with fs.mqueue.*
```
If you use the `--ipc=host` option these sysctls will not be allowed.
- `Network Namespace`:
Sysctls beginning with net.*
If you use the `--network=host` option using these sysctls will not be allowed.

View File

@ -0,0 +1,62 @@
---
title: "save"
description: "The save command description and usage"
keywords: "tarred, repository, backup"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# save
```markdown
Usage: docker save [OPTIONS] IMAGE [IMAGE...]
Save one or more images to a tar archive (streamed to STDOUT by default)
Options:
--help Print usage
-o, --output string Write to a file, instead of STDOUT
```
## Description
Produces a tarred repository to the standard output stream.
Contains all parent layers, and all tags + versions, or specified `repo:tag`, for
each argument provided.
## Examples
### Create a backup that can then be used with `docker load`.
```bash
$ docker save busybox > busybox.tar
$ ls -sh busybox.tar
2.7M busybox.tar
$ docker save --output busybox.tar busybox
$ ls -sh busybox.tar
2.7M busybox.tar
$ docker save -o fedora-all.tar fedora
$ docker save -o fedora-latest.tar fedora:latest
```
### Cherry-pick particular tags
You can even cherry-pick particular tags of an image repository.
```bash
$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
```

View File

@ -0,0 +1,149 @@
---
title: "search"
description: "The search command description and usage"
keywords: "search, hub, images"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# search
```markdown
Usage: docker search [OPTIONS] TERM
Search the Docker Hub for images
Options:
-f, --filter value Filter output based on conditions provided (default [])
- is-automated=(true|false)
- is-official=(true|false)
- stars=<number> - image has at least 'number' stars
--help Print usage
--limit int Max number of search results (default 25)
--no-trunc Don't truncate output
```
## Description
Search [Docker Hub](https://hub.docker.com) for images
See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for
more details on finding shared images from the command line.
> **Note**: Search queries return a maximum of 25 results.
## Examples
### Search images by name
This example displays images with a name containing 'busybox':
```none
$ docker search busybox
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
busybox Busybox base image. 316 [OK]
progrium/busybox 50 [OK]
radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK]
odise/busybox-python 2 [OK]
azukiapp/busybox This image is meant to be used as the base... 2 [OK]
ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK]
shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK]
odise/busybox-curl 1 [OK]
ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK]
peelsky/zulu-openjdk-busybox 1 [OK]
skomma/busybox-data Docker image suitable for data volume cont... 1 [OK]
elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK]
socketplane/busybox 1 [OK]
oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK]
ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK]
nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK]
openshift/busybox-http-app 0 [OK]
jllopis/busybox 0 [OK]
swyckoff/busybox 0 [OK]
powellquiring/busybox 0 [OK]
williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK]
simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK]
fhisamoto/busybox-java Busybox java 0 [OK]
scottabernethy/busybox 0 [OK]
marclop/busybox-solr
```
### Display non-truncated description (--no-trunc)
This example displays images with a name containing 'busybox',
at least 3 stars and the description isn't truncated in the output:
```bash
$ docker search --stars=3 --no-trunc busybox
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
busybox Busybox base image. 325 [OK]
progrium/busybox 50 [OK]
radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK]
```
### Limit search results (--limit)
The flag `--limit` is the maximum number of results returned by a search. This value could
be in the range between 1 and 100. The default value of `--limit` is 25.
### Filtering
The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more
than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* stars (int - number of stars the image has)
* is-automated (true|false) - is the image automated or not
* is-official (true|false) - is the image official or not
#### stars
This example displays images with a name containing 'busybox' and at
least 3 stars:
```bash
$ docker search --filter stars=3 busybox
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
busybox Busybox base image. 325 [OK]
progrium/busybox 50 [OK]
radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK]
```
#### is-automated
This example displays images with a name containing 'busybox'
and are automated builds:
```bash
$ docker search --filter is-automated busybox
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
progrium/busybox 50 [OK]
radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK]
```
#### is-official
This example displays images with a name containing 'busybox', at least
3 stars and are official builds:
```bash
$ docker search --filter "is-official=true" --filter "stars=3" busybox
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
progrium/busybox 50 [OK]
radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK]
```

View File

@ -0,0 +1,45 @@
---
title: "secret"
description: "The secret command description and usage"
keywords: "secret"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# secret
```markdown
Usage: docker secret COMMAND
Manage Docker secrets
Options:
--help Print usage
Commands:
create Create a secret from a file or STDIN as content
inspect Display detailed information on one or more secrets
ls List secrets
rm Remove one or more secrets
Run 'docker secret COMMAND --help' for more information on a command.
```
## Description
Manage secrets.
## Related commands
* [secret create](secret_create.md)
* [secret inspect](secret_inspect.md)
* [secret list](secret_list.md)
* [secret rm](secret_rm.md)

View File

@ -0,0 +1,99 @@
---
title: "secret create"
description: "The secret create command description and usage"
keywords: ["secret, create"]
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# secret create
```Markdown
Usage: docker secret create [OPTIONS] SECRET file|-
Create a secret from a file or STDIN as content
Options:
--help Print usage
-l, --label list Secret labels (default [])
```
## Description
Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node.
For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/).
## Examples
### Create a secret
```bash
$ echo <secret> | docker secret create my_secret -
onakdyv307se2tl7nl20anokv
$ docker secret ls
ID NAME CREATED UPDATED
onakdyv307se2tl7nl20anokv my_secret 6 seconds ago 6 seconds ago
```
### Create a secret with a file
```bash
$ docker secret create my_secret ./secret.json
dg426haahpi5ezmkkj5kyl3sn
$ docker secret ls
ID NAME CREATED UPDATED
dg426haahpi5ezmkkj5kyl3sn my_secret 7 seconds ago 7 seconds ago
```
### Create a secret with labels
```bash
$ docker secret create --label env=dev \
--label rev=20170324 \
my_secret ./secret.json
eo7jnzguqgtpdah3cm5srfb97
```
```none
$ docker secret inspect my_secret
[
{
"ID": "eo7jnzguqgtpdah3cm5srfb97",
"Version": {
"Index": 17
},
"CreatedAt": "2017-03-24T08:15:09.735271783Z",
"UpdatedAt": "2017-03-24T08:15:09.735271783Z",
"Spec": {
"Name": "my_secret",
"Labels": {
"env": "dev",
"rev": "20170324"
}
}
}
]
```
## Related commands
* [secret inspect](secret_inspect.md)
* [secret ls](secret_ls.md)
* [secret rm](secret_rm.md)

View File

@ -0,0 +1,95 @@
---
title: "secret inspect"
description: "The secret inspect command description and usage"
keywords: ["secret, inspect"]
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# secret inspect
```Markdown
Usage: docker secret inspect [OPTIONS] SECRET [SECRET...]
Display detailed information on one or more secrets
Options:
-f, --format string Format the output using the given Go template
--help Print usage
```
## Description
Inspects the specified secret. This command has to be run targeting a manager
node.
By default, this renders all results in a JSON array. If a format is specified,
the given template will be executed for each result.
Go's [text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/).
## Examples
### Inspect a secret by name or ID
You can inspect a secret, either by its *name*, or *ID*
For example, given the following secret:
```bash
$ docker secret ls
ID NAME CREATED UPDATED
eo7jnzguqgtpdah3cm5srfb97 my_secret 3 minutes ago 3 minutes ago
```
```none
$ docker secret inspect secret.json
[
{
"ID": "eo7jnzguqgtpdah3cm5srfb97",
"Version": {
"Index": 17
},
"CreatedAt": "2017-03-24T08:15:09.735271783Z",
"UpdatedAt": "2017-03-24T08:15:09.735271783Z",
"Spec": {
"Name": "my_secret",
"Labels": {
"env": "dev",
"rev": "20170324"
}
}
}
]
```
### Formatting
You can use the --format option to obtain specific information about a
secret. The following example command outputs the creation time of the
secret.
```bash
$ docker secret inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97
2017-03-24 08:15:09.735271783 +0000 UTC
```
## Related commands
* [secret create](secret_create.md)
* [secret ls](secret_ls.md)
* [secret rm](secret_rm.md)

View File

@ -0,0 +1,157 @@
---
title: "secret ls"
description: "The secret ls command description and usage"
keywords: ["secret, ls"]
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# secret ls
```Markdown
Usage: docker secret ls [OPTIONS]
List secrets
Aliases:
ls, list
Options:
-f, --filter filter Filter output based on conditions provided
--format string Pretty-print secrets using a Go template
--help Print usage
-q, --quiet Only display IDs
```
## Description
Run this command on a manager node to list the secrets in the swarm.
For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/).
## Examples
```bash
$ docker secret ls
ID NAME CREATED UPDATED
6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago
9u9hk4br2ej0wgngkga6rp4hq my_secret 5 weeks ago 5 weeks ago
mem02h8n73mybpgqjf0kfi1n0 test_secret 3 seconds ago 3 seconds ago
```
### Filtering
The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* [id](secret_ls.md#id) (secret's ID)
* [label](secret_ls.md#label) (`label=<key>` or `label=<key>=<value>`)
* [name](secret_ls.md#name) (secret's name)
#### id
The `id` filter matches all or prefix of a secret's id.
```bash
$ docker secret ls -f "id=6697bflskwj1998km1gnnjr38"
ID NAME CREATED UPDATED
6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago
```
#### label
The `label` filter matches secrets based on the presence of a `label` alone or
a `label` and a value.
The following filter matches all secrets with a `project` label regardless of
its value:
```bash
$ docker secret ls --filter label=project
ID NAME CREATED UPDATED
mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago
```
The following filter matches only services with the `project` label with the
`project-a` value.
```bash
$ docker service ls --filter label=project=test
ID NAME CREATED UPDATED
mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago
```
#### name
The `name` filter matches on all or prefix of a secret's name.
The following filter matches secret with a name containing a prefix of `test`.
```bash
$ docker secret ls --filter name=test_secret
ID NAME CREATED UPDATED
mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago
```
### Format the output
The formatting option (`--format`) pretty prints secrets output
using a Go template.
Valid placeholders for the Go template are listed below:
| Placeholder | Description |
| ------------ | ------------------------------------------------------------------------------------ |
| `.ID` | Secret ID |
| `.Name` | Secret name |
| `.CreatedAt` | Time when the secret was created |
| `.UpdatedAt` | Time when the secret was updated |
| `.Labels` | All labels assigned to the secret |
| `.Label` | Value of a specific label for this secret. For example `{{.Label "secret.ssh.key"}}` |
When using the `--format` option, the `secret ls` command will either
output the data exactly as the template declares or, when using the
`table` directive, will include column headers as well.
The following example uses a template without headers and outputs the
`ID` and `Name` entries separated by a colon for all images:
```bash
$ docker secret ls --format "{{.ID}}: {{.Name}}"
77af4d6b9913: secret-1
b6fa739cedf5: secret-2
78a85c484f71: secret-3
```
To list all secrets with their name and created date in a table format you
can use:
```bash
$ docker secret ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}"
ID NAME CREATED
77af4d6b9913 secret-1 5 minutes ago
b6fa739cedf5 secret-2 3 hours ago
78a85c484f71 secret-3 10 days ago
```
## Related commands
* [secret create](secret_create.md)
* [secret inspect](secret_inspect.md)
* [secret rm](secret_rm.md)

View File

@ -0,0 +1,54 @@
---
title: "secret rm"
description: "The secret rm command description and usage"
keywords: ["secret, rm"]
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# secret rm
```Markdown
Usage: docker secret rm SECRET [SECRET...]
Remove one or more secrets
Aliases:
rm, remove
Options:
--help Print usage
```
## Description
Removes the specified secrets from the swarm. This command has to be run
targeting a manager node.
For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/).
## Examples
This example removes a secret:
```bash
$ docker secret rm secret.json
sapth4csdo5b6wz2p5uimh5xg
```
> **Warning**: Unlike `docker rm`, this command does not ask for confirmation
> before removing a secret.
## Related commands
* [secret create](secret_create.md)
* [secret inspect](secret_inspect.md)
* [secret ls](secret_ls.md)

View File

@ -0,0 +1,42 @@
---
title: "service"
description: "The service command description and usage"
keywords: "service"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# service
```markdown
Usage: docker service COMMAND
Manage services
Options:
--help Print usage
Commands:
create Create a new service
inspect Display detailed information on one or more services
logs Fetch the logs of a service or task
ls List services
ps List the tasks of one or more services
rm Remove one or more services
scale Scale one or multiple replicated services
update Update a service
Run 'docker service COMMAND --help' for more information on a command.
```
## Description
Manage services.

View File

@ -0,0 +1,846 @@
---
title: "service create"
description: "The service create command description and usage"
keywords: "service, create"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# service create
```Markdown
Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...]
Create a new service
Options:
--constraint list Placement constraints
--container-label list Container labels
-d, --detach Exit immediately instead of waiting for the service to converge (default true)
--dns list Set custom DNS servers
--dns-option list Set DNS options
--dns-search list Set custom DNS search domains
--endpoint-mode string Endpoint mode (vip or dnsrr) (default "vip")
--entrypoint command Overwrite the default ENTRYPOINT of the image
-e, --env list Set environment variables
--env-file list Read in a file of environment variables
--group list Set one or more supplementary user groups for the container
--health-cmd string Command to run to check health
--health-interval duration Time between running the check (ms|s|m|h)
--health-retries int Consecutive failures needed to report unhealthy
--health-start-period duration Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)
--health-timeout duration Maximum time to allow one check to run (ms|s|m|h)
--help Print usage
--host list Set one or more custom host-to-IP mappings (host:ip)
--hostname string Container hostname
-l, --label list Service labels
--limit-cpu decimal Limit CPUs
--limit-memory bytes Limit Memory
--log-driver string Logging driver for service
--log-opt list Logging driver options
--mode string Service mode (replicated or global) (default "replicated")
--mount mount Attach a filesystem mount to the service
--name string Service name
--network list Network attachments
--no-healthcheck Disable any container-specified HEALTHCHECK
--placement-pref pref Add a placement preference
-p, --publish port Publish a port as a node port
-q, --quiet Suppress progress output
--read-only Mount the container's root filesystem as read only
--replicas uint Number of tasks
--reserve-cpu decimal Reserve CPUs
--reserve-memory bytes Reserve Memory
--restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any")
--restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)
--restart-max-attempts uint Maximum number of restarts before giving up
--restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h)
--rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
--rollback-failure-action string Action on rollback failure ("pause"|"continue") (default "pause")
--rollback-max-failure-ratio float Failure rate to tolerate during a rollback (default 0)
--rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)
--rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first")
--rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) (default 1)
--secret secret Specify secrets to expose to the service
--stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)
--stop-signal string Signal to stop the container
-t, --tty Allocate a pseudo-TTY
--update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s)
--update-failure-action string Action on update failure ("pause"|"continue"|"rollback") (default "pause")
--update-max-failure-ratio float Failure rate to tolerate during an update (default 0)
--update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s)
--update-order string Update order ("start-first"|"stop-first") (default "stop-first")
--update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1)
-u, --user string Username or UID (format: <name|uid>[:<group|gid>])
--with-registry-auth Send registry authentication details to swarm agents
-w, --workdir string Working directory inside the container
```
## Description
Creates a service as described by the specified parameters. You must run this
command on a manager node.
## Examples
### Create a service
```bash
$ docker service create --name redis redis:3.0.6
dmu1ept4cxcfe8k8lhtux3ro3
$ docker service create --mode global --name redis2 redis:3.0.6
a8q9dasaafudfs8q8w32udass
$ docker service ls
ID NAME MODE REPLICAS IMAGE
dmu1ept4cxcf redis replicated 1/1 redis:3.0.6
a8q9dasaafud redis2 global 1/1 redis:3.0.6
```
### Create a service with 5 replica tasks (--replicas)
Use the `--replicas` flag to set the number of replica tasks for a replicated
service. The following command creates a `redis` service with `5` replica tasks:
```bash
$ docker service create --name redis --replicas=5 redis:3.0.6
4cdgfyky7ozwh3htjfw0d12qv
```
The above command sets the *desired* number of tasks for the service. Even
though the command returns immediately, actual scaling of the service may take
some time. The `REPLICAS` column shows both the *actual* and *desired* number
of replica tasks for the service.
In the following example the desired state is `5` replicas, but the current
number of `RUNNING` tasks is `3`:
```bash
$ docker service ls
ID NAME MODE REPLICAS IMAGE
4cdgfyky7ozw redis replicated 3/5 redis:3.0.7
```
Once all the tasks are created and `RUNNING`, the actual number of tasks is
equal to the desired number:
```bash
$ docker service ls
ID NAME MODE REPLICAS IMAGE
4cdgfyky7ozw redis replicated 5/5 redis:3.0.7
```
### Create a service with secrets
Use the `--secret` flag to give a container access to a
[secret](secret_create.md).
Create a service specifying a secret:
```bash
$ docker service create --name redis --secret secret.json redis:3.0.6
4cdgfyky7ozwh3htjfw0d12qv
```
Create a service specifying the secret, target, user/group ID and mode:
```bash
$ docker service create --name redis \
--secret source=ssh-key,target=ssh \
--secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \
redis:3.0.6
4cdgfyky7ozwh3htjfw0d12qv
```
Secrets are located in `/run/secrets` in the container. If no target is
specified, the name of the secret will be used as the in memory file in the
container. If a target is specified, that will be the filename. In the
example above, two files will be created: `/run/secrets/ssh` and
`/run/secrets/app` for each of the secret targets specified.
### Create a service with a rolling update policy
```bash
$ docker service create \
--replicas 10 \
--name redis \
--update-delay 10s \
--update-parallelism 2 \
redis:3.0.6
```
When you run a [service update](service_update.md), the scheduler updates a
maximum of 2 tasks at a time, with `10s` between updates. For more information,
refer to the [rolling updates
tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/).
### Set environment variables (-e, --env)
This sets environmental variables for all tasks in a service. For example:
```bash
$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6
```
### Create a service with specific hostname (--hostname)
This option sets the docker service containers hostname to a specific string.
For example:
```bash
$ docker service create --name redis --hostname myredis redis:3.0.6
```
### Set metadata on a service (-l, --label)
A label is a `key=value` pair that applies metadata to a service. To label a
service with two labels:
```bash
$ docker service create \
--name redis_2 \
--label com.example.foo="bar"
--label bar=baz \
redis:3.0.6
```
For more information about labels, refer to [apply custom
metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/).
### Add bind-mounts or volumes
Docker supports two different kinds of mounts, which allow containers to read to
or write from files or directories on other containers or the host operating
system. These types are _data volumes_ (often referred to simply as volumes) and
_bind-mounts_.
Additionally, Docker supports `tmpfs` mounts.
A **bind-mount** makes a file or directory on the host available to the
container it is mounted within. A bind-mount may be either read-only or
read-write. For example, a container might share its host's DNS information by
means of a bind-mount of the host's `/etc/resolv.conf` or a container might
write logs to its host's `/var/log/myContainerLogs` directory. If you use
bind-mounts and your host and containers have different notions of permissions,
access controls, or other such details, you will run into portability issues.
A **named volume** is a mechanism for decoupling persistent data needed by your
container from the image used to create the container and from the host machine.
Named volumes are created and managed by Docker, and a named volume persists
even when no container is currently using it. Data in named volumes can be
shared between a container and the host machine, as well as between multiple
containers. Docker uses a _volume driver_ to create, manage, and mount volumes.
You can back up or restore volumes using Docker commands.
A **tmpfs** mounts a tmpfs inside a container for volatile data.
Consider a situation where your image starts a lightweight web server. You could
use that image as a base image, copy in your website's HTML files, and package
that into another image. Each time your website changed, you'd need to update
the new image and redeploy all of the containers serving your website. A better
solution is to store the website in a named volume which is attached to each of
your web server containers when they start. To update the website, you just
update the named volume.
For more information about named volumes, see
[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/).
The following table describes options which apply to both bind-mounts and named
volumes in a service:
<table>
<tr>
<th>Option</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td><b>types</b></td>
<td></td>
<td>
<p>The type of mount, can be either <tt>volume</tt>, <tt>bind</tt>, or <tt>tmpfs</tt>. Defaults to <tt>volume</tt> if no type is specified.
<ul>
<li><tt>volume</tt>: mounts a [managed volume](volume_create.md) into the container.</li>
<li><tt>bind</tt>: bind-mounts a directory or file from the host into the container.</li>
<li><tt>tmpfs</tt>: mount a tmpfs in the container</li>
</ul></p>
</td>
</tr>
<tr>
<td><b>src</b> or <b>source</b></td>
<td>for <tt>type=bind</tt> only></td>
<td>
<ul>
<li>
<tt>type=volume</tt>: <tt>src</tt> is an optional way to specify the name of the volume (for example, <tt>src=my-volume</tt>).
If the named volume does not exist, it is automatically created. If no <tt>src</tt> is specified, the volume is
assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide.
A randomly-named volume has the same lifecycle as its container and is destroyed when the <i>container</i>
is destroyed (which is upon <tt>service update</tt>, or when scaling or re-balancing the service)
</li>
<li>
<tt>type=bind</tt>: <tt>src</tt> is required, and specifies an absolute path to the file or directory to bind-mount
(for example, <tt>src=/path/on/host/</tt>). An error is produced if the file or directory does not exist.
</li>
<li>
<tt>type=tmpfs</tt>: <tt>src</tt> is not supported.
</li>
</ul>
</td>
</tr>
<tr>
<td><p><b>dst</b> or <b>destination</b> or <b>target</b></p></td>
<td>yes</td>
<td>
<p>Mount path inside the container, for example <tt>/some/path/in/container/</tt>.
If the path does not exist in the container's filesystem, the Engine creates
a directory at the specified location before mounting the volume or bind-mount.</p>
</td>
</tr>
<tr>
<td><p><b>readonly</b> or <b>ro</b></p></td>
<td></td>
<td>
<p>The Engine mounts binds and volumes <tt>read-write</tt> unless <tt>readonly</tt> option
is given when mounting the bind or volume.
<ul>
<li><tt>true</tt> or <tt>1</tt> or no value: Mounts the bind or volume read-only.</li>
<li><tt>false</tt> or <tt>0</tt>: Mounts the bind or volume read-write.</li>
</ul></p>
</td>
</tr>
<tr>
<td><b>consistency</b></td>
<td></td>
<td>
<p>The consistency requirements for the mount; one of
<ul>
<li><tt>default</tt>: Equivalent to <tt>consistent</tt>.</li>
<li><tt>consistent</tt>: Full consistency. The container runtime and the host maintain an identical view of the mount at all times.</li>
<li><tt>cached</tt>: The host's view of the mount is authoritative. There may be delays before updates made on the host are visible within a container.</li>
<li><tt>delegated</tt>: The container runtime's view of the mount is authoritative. There may be delays before updates made in a container are are visible on the host.</li>
</ul>
</p>
</td>
</tr>
</table>
#### Bind Propagation
Bind propagation refers to whether or not mounts created within a given
bind-mount or named volume can be propagated to replicas of that mount. Consider
a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings
control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each
propagation setting has a recursive counterpoint. In the case of recursion,
consider that `/tmp/a` is also mounted as `/foo`. The propagation settings
control whether `/mnt/a` and/or `/tmp/a` would exist.
The `bind-propagation` option defaults to `rprivate` for both bind-mounts and
volume mounts, and is only configurable for bind-mounts. In other words, named
volumes do not support bind propagation.
- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts,
and sub-mounts of replica mounts are also propagated to the
original mount.
- **`slave`**: similar to a shared mount, but only in one direction. If the
original mount exposes a sub-mount, the replica mount can see it.
However, if the replica mount exposes a sub-mount, the original
mount cannot see it.
- **`private`**: The mount is private. Sub-mounts within it are not exposed to
replica mounts, and sub-mounts of replica mounts are not
exposed to the original mount.
- **`rshared`**: The same as shared, but the propagation also extends to and from
mount points nested within any of the original or replica mount
points.
- **`rslave`**: The same as `slave`, but the propagation also extends to and from
mount points nested within any of the original or replica mount
points.
- **`rprivate`**: The default. The same as `private`, meaning that no mount points
anywhere within the original or replica mount points propagate
in either direction.
For more information about bind propagation, see the
[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
#### Options for Named Volumes
The following options can only be used for named volumes (`type=volume`);
<table>
<tr>
<th>Option</th>
<th>Description</th>
</tr>
<tr>
<td><b>volume-driver</b></td>
<td>
<p>Name of the volume-driver plugin to use for the volume. Defaults to
<tt>"local"</tt>, to use the local volume driver to create the volume if the
volume does not exist.</p>
</td>
</tr>
<tr>
<td><b>volume-label</b></td>
<td>
One or more custom metadata ("labels") to apply to the volume upon
creation. For example,
`volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more
information about labels, refer to
<a href="https://docs.docker.com/engine/userguide/labels-custom-metadata/">apply custom metadata</a>.
</td>
</tr>
<tr>
<td><b>volume-nocopy</b></td>
<td>
By default, if you attach an empty volume to a container, and files or
directories already existed at the mount-path in the container (<tt>dst</tt>),
the Engine copies those files and directories into the volume, allowing
the host to access them. Set `volume-nocopy` to disables copying files
from the container's filesystem to the volume and mount the empty volume.
A value is optional:
<ul>
<li><tt>true</tt> or <tt>1</tt>: Default if you do not provide a value. Disables copying.</li>
<li><tt>false</tt> or <tt>0</tt>: Enables copying.</li>
</ul>
</td>
</tr>
<tr>
<td><b>volume-opt</b></td>
<td>
Options specific to a given volume driver, which will be passed to the
driver when creating the volume. Options are provided as a comma-separated
list of key/value pairs, for example,
<tt>volume-opt=some-option=some-value,volume-opt=some-other-option=some-other-value</tt>.
For available options for a given driver, refer to that driver's
documentation.
</td>
</tr>
</table>
#### Options for tmpfs
The following options can only be used for tmpfs mounts (`type=tmpfs`);
<table>
<tr>
<th>Option</th>
<th>Description</th>
</tr>
<tr>
<td><b>tmpfs-size</b></td>
<td>Size of the tmpfs mount in bytes. Unlimited by default in Linux.</td>
</tr>
<tr>
<td><b>tmpfs-mode</b></td>
<td>File mode of the tmpfs in octal. (e.g. <tt>"700"</tt> or <tt>"0700"</tt>.) Defaults to <tt>"1777"</tt> in Linux.</td>
</tr>
</table>
#### Differences between "--mount" and "--volume"
The `--mount` flag supports most options that are supported by the `-v`
or `--volume` flag for `docker run`, with some important exceptions:
- The `--mount` flag allows you to specify a volume driver and volume driver
options *per volume*, without creating the volumes in advance. In contrast,
`docker run` allows you to specify a single volume driver which is shared
by all volumes, using the `--volume-driver` flag.
- The `--mount` flag allows you to specify custom metadata ("labels") for a volume,
before the volume is created.
- When you use `--mount` with `type=bind`, the host-path must refer to an *existing*
path on the host. The path will not be created for you and the service will fail
with an error if the path does not exist.
- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags,
which are used for `selinux` labeling.
#### Create a service using a named volume
The following example creates a service that uses a named volume:
```bash
$ docker service create \
--name my-service \
--replicas 3 \
--mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \
nginx:alpine
```
For each replica of the service, the engine requests a volume named "my-volume"
from the default ("local") volume driver where the task is deployed. If the
volume does not exist, the engine creates a new volume and applies the "color"
and "shape" labels.
When the task is started, the volume is mounted on `/path/in/container/` inside
the container.
Be aware that the default ("local") volume is a locally scoped volume driver.
This means that depending on where a task is deployed, either that task gets a
*new* volume named "my-volume", or shares the same "my-volume" with other tasks
of the same service. Multiple containers writing to a single shared volume can
cause data corruption if the software running inside the container is not
designed to handle concurrent processes writing to the same location. Also take
into account that containers can be re-scheduled by the Swarm orchestrator and
be deployed on a different node.
#### Create a service that uses an anonymous volume
The following command creates a service with three replicas with an anonymous
volume on `/path/in/container`:
```bash
$ docker service create \
--name my-service \
--replicas 3 \
--mount type=volume,destination=/path/in/container \
nginx:alpine
```
In this example, no name (`source`) is specified for the volume, so a new volume
is created for each task. This guarantees that each task gets its own volume,
and volumes are not shared between tasks. Anonymous volumes are removed after
the task using them is complete.
#### Create a service that uses a bind-mounted host directory
The following example bind-mounts a host directory at `/path/in/container` in
the containers backing the service:
```bash
$ docker service create \
--name my-service \
--mount type=bind,source=/path/on/host,destination=/path/in/container \
nginx:alpine
```
### Set service mode (--mode)
The service mode determines whether this is a _replicated_ service or a _global_
service. A replicated service runs as many tasks as specified, while a global
service runs on each active node in the swarm.
The following command creates a global service:
```bash
$ docker service create \
--name redis_2 \
--mode global \
redis:3.0.6
```
### Specify service constraints (--constraint)
You can limit the set of nodes where a task can be scheduled by defining
constraint expressions. Multiple constraints find nodes that satisfy every
expression (AND match). Constraints can match node or Docker Engine labels as
follows:
<table>
<tr>
<th>node attribute</th>
<th>matches</th>
<th>example</th>
</tr>
<tr>
<td><tt>node.id</tt></td>
<td>Node ID</td>
<td><tt>node.id == 2ivku8v2gvtg4</tt></td>
</tr>
<tr>
<td><tt>node.hostname</tt></td>
<td>Node hostname</td>
<td><tt>node.hostname != node-2</tt></td>
</tr>
<tr>
<td><tt>node.role</tt></td>
<td>Node role</td>
<td><tt>node.role == manager</tt></td>
</tr>
<tr>
<td><tt>node.labels</tt></td>
<td>user defined node labels</td>
<td><tt>node.labels.security == high</tt></td>
</tr>
<tr>
<td><tt>engine.labels</tt></td>
<td>Docker Engine's labels</td>
<td><tt>engine.labels.operatingsystem == ubuntu 14.04</tt></td>
</tr>
</table>
`engine.labels` apply to Docker Engine labels like operating system,
drivers, etc. Swarm administrators add `node.labels` for operational purposes by
using the [`docker node update`](node_update.md) command.
For example, the following limits tasks for the redis service to nodes where the
node type label equals queue:
```bash
$ docker service create \
--name redis_2 \
--constraint 'node.labels.type == queue' \
redis:3.0.6
```
### Specify service placement preferences (--placement-pref)
You can set up the service to divide tasks evenly over different categories of
nodes. One example of where this can be useful is to balance tasks over a set
of datacenters or availability zones. The example below illustrates this:
```bash
$ docker service create \
--replicas 9 \
--name redis_2 \
--placement-pref 'spread=node.labels.datacenter' \
redis:3.0.6
```
This uses `--placement-pref` with a `spread` strategy (currently the only
supported strategy) to spread tasks evenly over the values of the `datacenter`
node label. In this example, we assume that every node has a `datacenter` node
label attached to it. If there are three different values of this label among
nodes in the swarm, one third of the tasks will be placed on the nodes
associated with each value. This is true even if there are more nodes with one
value than another. For example, consider the following set of nodes:
- Three nodes with `node.labels.datacenter=east`
- Two nodes with `node.labels.datacenter=south`
- One node with `node.labels.datacenter=west`
Since we are spreading over the values of the `datacenter` label and the
service has 9 replicas, 3 replicas will end up in each datacenter. There are
three nodes associated with the value `east`, so each one will get one of the
three replicas reserved for this value. There are two nodes with the value
`south`, and the three replicas for this value will be divided between them,
with one receiving two replicas and another receiving just one. Finally, `west`
has a single node that will get all three replicas reserved for `west`.
If the nodes in one category (for example, those with
`node.labels.datacenter=south`) can't handle their fair share of tasks due to
constraints or resource limitations, the extra tasks will be assigned to other
nodes instead, if possible.
Both engine labels and node labels are supported by placement preferences. The
example above uses a node label, because the label is referenced with
`node.labels.datacenter`. To spread over the values of an engine label, use
`--placement-pref spread=engine.labels.<labelname>`.
It is possible to add multiple placement preferences to a service. This
establishes a hierarchy of preferences, so that tasks are first divided over
one category, and then further divided over additional categories. One example
of where this may be useful is dividing tasks fairly between datacenters, and
then splitting the tasks within each datacenter over a choice of racks. To add
multiple placement preferences, specify the `--placement-pref` flag multiple
times. The order is significant, and the placement preferences will be applied
in the order given when making scheduling decisions.
The following example sets up a service with multiple placement preferences.
Tasks are spread first over the various datacenters, and then over racks
(as indicated by the respective labels):
```bash
$ docker service create \
--replicas 9 \
--name redis_2 \
--placement-pref 'spread=node.labels.datacenter' \
--placement-pref 'spread=node.labels.rack' \
redis:3.0.6
```
When updating a service with `docker service update`, `--placement-pref-add`
appends a new placement preference after all existing placement preferences.
`--placement-pref-rm` removes an existing placement preference that matches the
argument.
### Attach a service to an existing network (--network)
You can use overlay networks to connect one or more services within the swarm.
First, create an overlay network on a manager node the docker network create
command:
```bash
$ docker network create --driver overlay my-network
etjpu59cykrptrgw0z0hk5snf
```
After you create an overlay network in swarm mode, all manager nodes have
access to the network.
When you create a service and pass the --network flag to attach the service to
the overlay network:
```bash
$ docker service create \
--replicas 3 \
--network my-network \
--name my-web \
nginx
716thylsndqma81j6kkkb5aus
```
The swarm extends my-network to each node running the service.
Containers on the same network can access each other using
[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery).
### Publish service ports externally to the swarm (-p, --publish)
You can publish service ports to make them available externally to the swarm
using the `--publish` flag:
```bash
$ docker service create --publish <TARGET-PORT>:<SERVICE-PORT> nginx
```
For example:
```bash
$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx
```
When you publish a service port, the swarm routing mesh makes the service
accessible at the target port on every node regardless if there is a task for
the service running on the node. For more information refer to
[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/).
### Publish a port for TCP only or UDP only
By default, when you publish a port, it is a TCP port. You can
specifically publish a UDP port instead of or in addition to a TCP port. When
you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to
add the suffix `/tcp` for TCP ports. Otherwise it is optional.
#### TCP only
The following two commands are equivalent.
```bash
$ docker service create --name dns-cache -p 53:53 dns-cache
$ docker service create --name dns-cache -p 53:53/tcp dns-cache
```
#### TCP and UDP
```bash
$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache
```
#### UDP only
```bash
$ docker service create --name dns-cache -p 53:53/udp dns-cache
```
### Create services using templates
You can use templates for some flags of `service create`, using the syntax
provided by the Go's [text/template](http://golang.org/pkg/text/template/) package.
The supported flags are the following :
- `--hostname`
- `--mount`
- `--env`
Valid placeholders for the Go template are listed below:
<table>
<tr>
<th>Placeholder</th>
<th>Description</th>
</tr>
<tr>
<td><tt>.Service.ID</tt></td>
<td>Service ID</td>
</tr>
<tr>
<td><tt>.Service.Name</tt></td>
<td>Service name</td>
</tr>
<tr>
<td><tt>.Service.Labels</tt></td>
<td>Service labels</td>
</tr>
<tr>
<td><tt>.Node.ID</tt></td>
<td>Node ID</td>
</tr>
<tr>
<td><tt>.Task.ID</tt></td>
<td>Task ID</td>
</tr>
<tr>
<td><tt>.Task.Name</tt></td>
<td>Task name</td>
</tr>
<tr>
<td><tt>.Task.Slot</tt></td>
<td>Task slot</td>
</tr>
</table>
#### Template example
In this example, we are going to set the template of the created containers based on the
service's name and the node's ID where it sits.
```bash
$ docker service create --name hosttempl \
--hostname="{{.Node.ID}}-{{.Service.Name}}"\
busybox top
va8ew30grofhjoychbr6iot8c
$ docker service ps va8ew30grofhjoychbr6iot8c
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago
$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj
x3ti0erg11rjpg64m75kej2mz-hosttempl
```
## Related commands
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)
* [service ps](service_ps.md)
* [service update](service_update.md)
<style>table tr > td:first-child { white-space: nowrap;}</style>

View File

@ -0,0 +1,170 @@
---
title: "service inspect"
description: "The service inspect command description and usage"
keywords: "service, inspect"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# service inspect
```Markdown
Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...]
Display detailed information on one or more services
Options:
-f, --format string Format the output using the given Go template
--help Print usage
--pretty Print the information in a human friendly format
```
## Description
Inspects the specified service. This command has to be run targeting a manager
node.
By default, this renders all results in a JSON array. If a format is specified,
the given template will be executed for each result.
Go's [text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
## Examples
### Inspect a service by name or ID
You can inspect a service, either by its *name*, or *ID*
For example, given the following service;
```bash
$ docker service ls
ID NAME MODE REPLICAS IMAGE
dmu1ept4cxcf redis replicated 3/3 redis:3.0.6
```
Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf`
produce the same result:
```none
$ docker service inspect redis
[
{
"ID": "dmu1ept4cxcfe8k8lhtux3ro3",
"Version": {
"Index": 12
},
"CreatedAt": "2016-06-17T18:44:02.558012087Z",
"UpdatedAt": "2016-06-17T18:44:02.558012087Z",
"Spec": {
"Name": "redis",
"TaskTemplate": {
"ContainerSpec": {
"Image": "redis:3.0.6"
},
"Resources": {
"Limits": {},
"Reservations": {}
},
"RestartPolicy": {
"Condition": "any",
"MaxAttempts": 0
},
"Placement": {}
},
"Mode": {
"Replicated": {
"Replicas": 1
}
},
"UpdateConfig": {},
"EndpointSpec": {
"Mode": "vip"
}
},
"Endpoint": {
"Spec": {}
}
}
]
```
```bash
$ docker service inspect dmu1ept4cxcf
[
{
"ID": "dmu1ept4cxcfe8k8lhtux3ro3",
"Version": {
"Index": 12
},
...
}
]
```
### Formatting
You can print the inspect output in a human-readable format instead of the default
JSON output, by using the `--pretty` option:
```bash
$ docker service inspect --pretty frontend
ID: c8wgl7q4ndfd52ni6qftkvnnp
Name: frontend
Labels:
- org.example.projectname=demo-app
Service Mode: REPLICATED
Replicas: 5
Placement:
UpdateConfig:
Parallelism: 0
On failure: pause
Max failure ratio: 0
ContainerSpec:
Image: nginx:alpine
Resources:
Networks: net1
Endpoint Mode: vip
Ports:
PublishedPort = 4443
Protocol = tcp
TargetPort = 443
PublishMode = ingress
```
You can also use `--format pretty` for the same effect.
#### Find the number of tasks running as part of a service
The `--format` option can be used to obtain specific information about a
service. For example, the following command outputs the number of replicas
of the "redis" service.
```bash
$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis
10
```
## Related commands
* [service create](service_create.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)
* [service ps](service_ps.md)
* [service update](service_update.md)

Some files were not shown because too many files have changed in this diff Show More