Initial Go Port (logs events from swarm) (#8)
* Initial Go Port (logs events from swarm) * Initial Plugin interface * Integrate with latest msgbus changes * Implement docker proxy endpoint * Fixed docker image and default Docker URL * Refactor docker client wrapper * Better version of proxy supporting UNIX and HTTP/HTTPS Docker API endpoints * Fixed UNIXTransport * Choose between https/http for Docker API endpoint proxying based on known ports * Refactor build infra * Add Travis config and badges * Vendor packages * Fixed exposed port in Dockerfile * Add Docker Stackfile * Updated 3rd-party vendored packages * Working e2e golang portpull/10/head
parent
c5db620d24
commit
d98d305860
52 changed files with 1231 additions and 927 deletions
@ -0,0 +1,45 @@ |
||||
[submodule "vendor/github.com/Sirupsen/logrus"] |
||||
path = vendor/github.com/Sirupsen/logrus |
||||
url = https://github.com/Sirupsen/logrus |
||||
[submodule "vendor/golang.org/x/crypto"] |
||||
path = vendor/golang.org/x/crypto |
||||
url = https://go.googlesource.com/crypto |
||||
[submodule "vendor/golang.org/x/sys"] |
||||
path = vendor/golang.org/x/sys |
||||
url = https://go.googlesource.com/sys |
||||
[submodule "vendor/github.com/docker/docker"] |
||||
path = vendor/github.com/docker/docker |
||||
url = https://github.com/docker/docker |
||||
[submodule "vendor/github.com/namsral/flag"] |
||||
path = vendor/github.com/namsral/flag |
||||
url = https://github.com/namsral/flag |
||||
[submodule "vendor/github.com/stretchr/testify"] |
||||
path = vendor/github.com/stretchr/testify |
||||
url = https://github.com/stretchr/testify |
||||
[submodule "vendor/github.com/prologic/msgbus"] |
||||
path = vendor/github.com/prologic/msgbus |
||||
url = https://github.com/prologic/msgbus |
||||
[submodule "vendor/golang.org/x/net"] |
||||
path = vendor/golang.org/x/net |
||||
url = https://go.googlesource.com/net |
||||
[submodule "vendor/github.com/prometheus/client_golang"] |
||||
path = vendor/github.com/prometheus/client_golang |
||||
url = https://github.com/prometheus/client_golang |
||||
[submodule "vendor/github.com/beorn7/perks"] |
||||
path = vendor/github.com/beorn7/perks |
||||
url = https://github.com/beorn7/perks |
||||
[submodule "vendor/github.com/golang/protobuf"] |
||||
path = vendor/github.com/golang/protobuf |
||||
url = https://github.com/golang/protobuf |
||||
[submodule "vendor/github.com/prometheus/client_model"] |
||||
path = vendor/github.com/prometheus/client_model |
||||
url = https://github.com/prometheus/client_model |
||||
[submodule "vendor/github.com/prometheus/common"] |
||||
path = vendor/github.com/prometheus/common |
||||
url = https://github.com/prometheus/common |
||||
[submodule "vendor/github.com/matttproud/golang_protobuf_extensions"] |
||||
path = vendor/github.com/matttproud/golang_protobuf_extensions |
||||
url = https://github.com/matttproud/golang_protobuf_extensions |
||||
[submodule "vendor/github.com/prometheus/procfs"] |
||||
path = vendor/github.com/prometheus/procfs |
||||
url = https://github.com/prometheus/procfs |
@ -0,0 +1,8 @@ |
||||
language: go |
||||
sudo: false |
||||
go: |
||||
- tip |
||||
script: |
||||
- make test |
||||
after_success: |
||||
- bash <(curl -s https://codecov.io/bash) |
@ -1,16 +1,30 @@ |
||||
FROM prologic/python-runtime:2.7 |
||||
# Build |
||||
FROM golang:alpine AS build |
||||
|
||||
EXPOSE 1338/udp 1338/tcp |
||||
ARG TAG |
||||
ARG BUILD |
||||
|
||||
ENTRYPOINT ["autodock"] |
||||
CMD [] |
||||
ENV APP autodock |
||||
ENV REPO prologic/$APP |
||||
|
||||
RUN apk -U add git && \ |
||||
RUN apk add --update git make build-base && \ |
||||
rm -rf /var/cache/apk/* |
||||
|
||||
COPY requirements.txt /tmp/requirements.txt |
||||
RUN pip install -r /tmp/requirements.txt && rm /tmp/requirements.txt |
||||
WORKDIR /go/src/github.com/$REPO |
||||
COPY . /go/src/github.com/$REPO |
||||
RUN make TAG=$TAG BUILD=$BUILD build |
||||
|
||||
# Runtime |
||||
FROM scratch |
||||
|
||||
ENV APP autodock |
||||
ENV REPO prologic/$APP |
||||
|
||||
LABEL autodock.app main |
||||
|
||||
WORKDIR /app |
||||
COPY . /app/ |
||||
RUN pip install . |
||||
COPY --from=build /go/src/github.com/${REPO}/cmd/${APP}/${APP} /${APP} |
||||
|
||||
EXPOSE 8000/tcp |
||||
|
||||
ENTRYPOINT ["/autodock"] |
||||
CMD [] |
||||
|
@ -0,0 +1,32 @@ |
||||
.PHONY: dev build image test deps clean |
||||
|
||||
CGO_ENABLED=0
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
APP=autodock
|
||||
REPO?=prologic/$(APP)
|
||||
TAG?=latest
|
||||
BUILD?=-dev
|
||||
|
||||
all: dev |
||||
|
||||
dev: build |
||||
@./cmd/$(APP)/$(APP) -debug
|
||||
|
||||
deps: |
||||
@go get ./...
|
||||
|
||||
build: clean deps |
||||
@echo " -> Building $(TAG)$(BUILD)"
|
||||
@cd cmd/$(APP) && go build -tags "netgo static_build" -installsuffix netgo \
|
||||
-ldflags "-w -X github.com/$(REPO)/version.GitCommit=$(COMMIT) -X github.com/$(REPO)/version.Build=$(BUILD)" .
|
||||
@echo "Built $$(./cmd/$(APP)/$(APP) -v)"
|
||||
|
||||
image: |
||||
@docker build --build-arg TAG=$(TAG) --build-arg BUILD=$(BUILD) -t $(REPO):$(TAG) .
|
||||
@echo "Image created: $(REPO):$(TAG)"
|
||||
|
||||
test: |
||||
@go test -v -cover -race $(TEST_ARGS)
|
||||
|
||||
clean: |
||||
@rm -rf $(APP)
|
@ -1,106 +1,56 @@ |
||||
autodock |
||||
======== |
||||
# autodock |
||||
|
||||
[](https://travis-ci.org/prologic/autodock) |
||||
[](https://codecov.io/gh/prologic/autodock) |
||||
[](https://goreportcard.com/report/github.com/prologic/autodock) |
||||
[](https://imagelayers.io/?images=prologic/autodock:latest) |
||||
[](https://godoc.org/github.com/prologic/autodock) |
||||
|
||||
[autodock](https://github.com/prologic/autodock) is a Daemon for Docker Automation. |
||||
[autodock](https://github.com/prologic/autodock) is a Daemon for |
||||
Docker Automation which enables you to maintain and automate your Docker |
||||
infrastructure by reacting to Docker or Docker Swarm events. |
||||
|
||||
autodock is MIT licensed. |
||||
## Supported plugins: |
||||
|
||||
Installation |
||||
------------ |
||||
autodock comes with a number of plugins where each piece of functionality is |
||||
rovided by a separate plugin. Each plugin is "linked" to autodock to receive |
||||
Docker events and issue new Docker API commands. |
||||
|
||||
Either pull the automatically updated [Docker](http://docker.com/) image: |
||||
The following list is a list of the currently available plugins: |
||||
|
||||
$ docker pull prologic/autodock |
||||
- [autodock-cron](https://github.com/prologic/autodock) |
||||
Provides a *Cron* like scheduler for Containers/Services |
||||
- [autodock-logger](https://github.com/prologic/autodock-logger) |
||||
Logs Dockers Events |
||||
|
||||
Or install from the development repository: |
||||
## Installation |
||||
|
||||
$ git clone https://github.com/prologic/autodock.git |
||||
$ cd autodock |
||||
$ pip install -r requirements.txt |
||||
### Docker |
||||
|
||||
Plugins |
||||
------- |
||||
```#!bash |
||||
$ docker pull prologic/autodock |
||||
``` |
||||
|
||||
autodock comes with a number of plugins where each piece of functionality is provided by a separate plugin. Each plugin is "linked" to autodock to receive Docker events and issue new Docker API commands. The following list is a list of the currently available plugins for production use: |
||||
### Source |
||||
|
||||
- [autodock-cron](https://github.com/prologic/autodock) -- Provides a Cron-like scheduler for Containers |
||||
- [autodock-logger](https://github.com/prologic/autodock-logger) -- Logs Dockers Events |
||||
- [autodock-hipache](https://github.com/prologic/autodock-hipache) -- Automatically registers virtualhosts with [hipache](https://github.com/hipache/hipache) |
||||
```#!bash |
||||
$ go install github.com/prologic/autodock |
||||
``` |
||||
|
||||
Example \#1 -- Logging Docker Events |
||||
------------------------------------ |
||||
## Usage |
||||
|
||||
> **note** |
||||
> |
||||
> See: [autodock Logger plugin](http://github.com/prologic/autodock-logger) |
||||
### Docker |
||||
|
||||
Start the daemon: |
||||
```#!bash |
||||
$ docker run -d -p 8000:8000 -v /var/run/docker.sock:/var/run/docker.sock prologic/autodock |
||||
``` |
||||
|
||||
$ docker run -d -v /var/run/docker.sock:/var/run/docker.sock --name autodock:autodock prologic/autodock |
||||
### Source |
||||
|
||||
Link and start an autodock plugin: |
||||
```#!bash |
||||
$ autodock |
||||
``` |
||||
|
||||
$ docker run -i -t --link autodock prologic/autodock-logger |
||||
## License |
||||
|
||||
Now whenever you start a new container autodock will listen for Docker events. The `autodock-logger` plugin will log all Docker Events received by autodock. |
||||
|
||||
Example \#2 -- Automatic Virtual Hosting with hipache |
||||
----------------------------------------------------- |
||||
|
||||
> **note** |
||||
> |
||||
> See [autodock Hipache plugin](http://github.com/prologic/autodock-hipache) |
||||
|
||||
Start the daemon: |
||||
|
||||
$ docker run -d --name autodock prologic/autodock |
||||
|
||||
Link and start an autodock plugin: |
||||
|
||||
$ docker run -d --link autodock prologic/autodock-hipache |
||||
|
||||
Now whenever you start a new container autodock will listen for Docker events and discover containers that have been started. The `autodock-hipache` plugin will specifically listen for starting containers that have a `VIRTUALHOST` environment variable and reconfigure the running `hipache` container. |
||||
|
||||
Start a "Hello World" Web Application: |
||||
|
||||
$ docker run -d -e VIRTUALHOST=hello.local prologic/hello |
||||
|
||||
Now assuming you had `hello.local` configured in your `/etc/hosts` pointing to your `hipache` container you can now visit <http://hello.local/> |
||||
|
||||
echo "127.0.0.1 hello.local" >> /etc/hosts |
||||
curl -q -o - http://hello.local/ |
||||
Hello World! |
||||
|
||||
> **note** |
||||
> |
||||
> This method of hosting and managing webapps and websites is in production deployments and talked about in more detail in the post [A Docker-based mini-PaaS](http://shortcircuit.net.au/~prologic/blog/article/2015/03/24/a-docker-based-mini-paas/). |
||||
|
||||
Example \#3 -- Cron-like Scheduling of Containers |
||||
------------------------------------------------- |
||||
|
||||
> **note** |
||||
> |
||||
> See [autodock Cron plugin](http://github.com/prologic/autodock-cron) |
||||
|
||||
Start the daemon: |
||||
|
||||
$ docker run -d --name autodock prologic/autodock |
||||
|
||||
Link and start an autodock plugin: |
||||
|
||||
$ docker run -d --link autodock prologic/autodock-cron |
||||
|
||||
Now whenever you create a new container autodock will listen for Docker events and discover containers that have been created. The `autodock-cron` plugin will specifically listen for created containers that have a `CRON` environment variable and schedule a job based on the cron expression supplied and re-run that container when its scheduled has triggered. |
||||
|
||||
Start a "Hello" Busybox Container: |
||||
|
||||
$ docker run -i -t --name hello busybox sh -c "echo Hello" |
||||
|
||||
After about three minutes or so you should see the following in the logs: |
||||
|
||||
$ docker logs hello |
||||
Hello |
||||
Hello |
||||
Hello |
||||
MIT |
||||
|
@ -1,127 +0,0 @@ |
||||
.. _docker: http://docker.com/ |
||||
.. _dotCloud: http://dotcloud.com/ |
||||
.. _hipache: https://github.com/hipache/hipache |
||||
.. _autodock: https://github.com/prologic/autodock |
||||
.. _autodock-cron: https://github.com/prologic/autodock |
||||
.. _autodock-logger: https://github.com/prologic/autodock-logger |
||||
.. _autodock-hipache: https://github.com/prologic/autodock-hipache |
||||
|
||||
|
||||
autodock |
||||
======== |
||||
|
||||
.. image:: https://badge.imagelayers.io/prologic/autodock:latest.svg |
||||
:target: https://imagelayers.io/?images=prologic/autodock:latest |
||||
:alt: Image Layers |
||||
|
||||
`autodock`_ is a Daemon for Docker Automation. |
||||
|
||||
autodock is MIT licensed. |
||||
|
||||
Installation |
||||
------------ |
||||
|
||||
Either pull the automatically updated `Docker`_ image:: |
||||
|
||||
$ docker pull prologic/autodock |
||||
|
||||
Or install from the development repository:: |
||||
|
||||
$ git clone https://github.com/prologic/autodock.git |
||||
$ cd autodock |
||||
$ pip install -r requirements.txt |
||||
|
||||
|
||||
Plugins |
||||
------- |
||||
|
||||
autodock comes with a number of plugins where each piece of functionality is |
||||
provided by a separate plugin. Each plugin is "linked" to autodock to receive |
||||
Docker events and issue new Docker API commands. The following list is a list |
||||
of the currently available plugins for production use: |
||||
|
||||
- `autodock-cron`_ -- Provides a Cron-like scheduler for Containers |
||||
- `autodock-logger`_ -- Logs Dockers Events |
||||
- `autodock-hipache`_ -- Automatically registers virtualhosts with `hipache`_ |
||||
|
||||
|
||||
Example #1 -- Logging Docker Events |
||||
----------------------------------- |
||||
|
||||
.. note:: See: `autodock Logger plugin <http://github.com/prologic/autodock-logger>`_ |
||||
|
||||
Start the daemon:: |
||||
|
||||
$ docker run -d -v /var/run/docker.sock:/var/run/docker.sock --name autodock:autodock prologic/autodock |
||||
|
||||
Link and start an autodock plugin:: |
||||
|
||||
$ docker run -i -t --link autodock prologic/autodock-logger |
||||
|
||||
Now whenever you start a new container autodock will listen for Docker events. |
||||
The ``autodock-logger`` plugin will log all Docker Events received by autodock. |
||||
|
||||
|
||||
Example #2 -- Automatic Virtual Hosting with hipache |
||||
---------------------------------------------------- |
||||
|
||||
.. note:: See `autodock Hipache plugin <http://github.com/prologic/autodock-hipache>`_ |
||||
|
||||
Start the daemon:: |
||||
|
||||
$ docker run -d --name autodock prologic/autodock |
||||
|
||||
Link and start an autodock plugin:: |
||||
|
||||
$ docker run -d --link autodock prologic/autodock-hipache |
||||
|
||||
Now whenever you start a new container autodock will listen for Docker events |
||||
and discover containers that have been started. The ``autodock-hipache`` plugin |
||||
will specifically listen for starting containers that have a ``VIRTUALHOST`` |
||||
environment variable and reconfigure the running ``hipache`` container. |
||||
|
||||
Start a "Hello World" Web Application:: |
||||
|
||||
$ docker run -d -e VIRTUALHOST=hello.local prologic/hello |
||||
|
||||
Now assuming you had ``hello.local`` configured in your ``/etc/hosts`` |
||||
pointing to your ``hipache`` container you can now visit http://hello.local/ |
||||
|
||||
:: |
||||
|
||||
echo "127.0.0.1 hello.local" >> /etc/hosts |
||||
curl -q -o - http://hello.local/ |
||||
Hello World! |
||||
|
||||
.. note:: This method of hosting and managing webapps and websites is in production deployments and talked about in more detail in the post `A Docker-based mini-PaaS <http://shortcircuit.net.au/~prologic/blog/article/2015/03/24/a-docker-based-mini-paas/>`_. |
||||
|
||||
|
||||
Example #3 -- Cron-like Scheduling of Containers |
||||
------------------------------------------------ |
||||
|
||||
.. note:: See `autodock Cron plugin <http://github.com/prologic/autodock-cron>`_ |
||||
|
||||
Start the daemon:: |
||||
|
||||
$ docker run -d --name autodock prologic/autodock |
||||
|
||||
Link and start an autodock plugin:: |
||||
|
||||
$ docker run -d --link autodock prologic/autodock-cron |
||||
|
||||
Now whenever you create a new container autodock will listen for Docker events |
||||
and discover containers that have been created. The ``autodock-cron`` plugin |
||||
will specifically listen for created containers that have a ``CRON`` |
||||
environment variable and schedule a job based on the cron expression supplied |
||||
and re-run that container when its scheduled has triggered. |
||||
|
||||
Start a "Hello" Busybox Container:: |
||||
|
||||
$ docker run -i -t --name hello busybox sh -c "echo Hello" |
||||
|
||||
After about three minutes or so you should see the following in the logs:: |
||||
|
||||
$ docker logs hello |
||||
Hello |
||||
Hello |
||||
Hello |
@ -1,18 +0,0 @@ |
||||
# Package: autodock |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""autodock - Daemon for Docker Automation |
||||
|
||||
autodock is a Daemon for Automating Docker. |
||||
|
||||
:copyright: CopyRight (C) 2014 by James Mills |
||||
""" |
||||
|
||||
|
||||
__author__ = "James Mills, prologic at shortcircuit dot net dot au" |
||||
__date__ = "15th November 2014" |
||||
|
||||
|
||||
from .version import version as __version__ # noqa |
@ -1,65 +0,0 @@ |
||||
# Module: codecs |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Codecs""" |
||||
|
||||
|
||||
import json |
||||
from functools import partial |
||||
|
||||
|
||||
from circuits import Event |
||||
from circuits.six import bytes_to_str, text_type |
||||
|
||||
|
||||
class JSONEncoder(json.JSONEncoder): |
||||
|
||||
def default(self, obj): |
||||
if isinstance(obj, Event): |
||||
return { |
||||
"name": obj.name, |
||||
"args": obj.args, |
||||
"kwargs": obj.kwargs, |
||||
"success": obj.success, |
||||
"failure": obj.failure, |
||||
"channels": obj.channels, |
||||
"notify": obj.notify |
||||
} |
||||
|
||||
# Let the base class default method raise the TypeError |
||||
return json.JSONEncoder.default(self, obj) |
||||
|
||||
|
||||
class JSONDecoder(json.JSONDecoder): |
||||
|
||||
def decode(self, data): |
||||
obj = json.loads(data) |
||||
|
||||
name = bytes_to_str(obj["name"].encode("utf-8")) |
||||
|
||||
args = [] |
||||
for arg in obj["args"]: |
||||
if isinstance(arg, text_type): |
||||
arg = arg.encode("utf-8") |
||||
args.append(arg) |
||||
|
||||
kwargs = {} |
||||
for k, v in obj["kwargs"].items(): |
||||
if isinstance(v, text_type): |
||||
v = v.encode("utf-8") |
||||
kwargs[str(k)] = v |
||||
|
||||
e = Event.create(name, *args, **kwargs) |
||||
|
||||
e.success = bool(obj["success"]) |
||||
e.failure = bool(obj["failure"]) |
||||
e.notify = bool(obj["notify"]) |
||||
e.channels = tuple(obj["channels"]) |
||||
|
||||
return e |
||||
|
||||
|
||||
dumps = partial(json.dumps, cls=JSONEncoder) |
||||
loads = partial(json.loads, cls=JSONDecoder) |
@ -1,102 +0,0 @@ |
||||
# Module: events |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Events""" |
||||
|
||||
|
||||
from circuits import Event |
||||
|
||||
|
||||
class docker_event(Event): |
||||
"""Docker Event""" |
||||
|
||||
|
||||
class container_attached(docker_event): |
||||
"""Container Attached Event""" |
||||
|
||||
|
||||
class container_committed(docker_event): |
||||
"""Container Commited Event""" |
||||
|
||||
|
||||
class container_created(docker_event): |
||||
"""Container Created Event""" |
||||
|
||||
|
||||
class container_destroyed(docker_event): |
||||
"""Container Destroyed Event""" |
||||
|
||||
|
||||
class container_started(docker_event): |
||||
"""Container Started Event""" |
||||
|
||||
|
||||
class container_stopped(docker_event): |
||||
"""Container Stopped Event""" |
||||
|
||||
|
||||
class container_killed(docker_event): |
||||
"""Container Killed Event""" |
||||
|
||||
|
||||
class container_died(docker_event): |
||||
"""Container Died Event""" |
||||
|
||||
|
||||
class container_exported(docker_event): |
||||
"""Container Exported Event""" |
||||
|
||||
|
||||
class container_paused(docker_event): |
||||
"""Container Paused Event""" |
||||
|
||||
|
||||
class container_renamed(docker_event): |
||||
"""Container Renamed Event""" |
||||
|
||||
|
||||
class container_resized(docker_event): |
||||
"""Container Resized Event""" |
||||
|
||||
|
||||
class container_restarted(docker_event): |
||||
"""Container Restarted Event""" |
||||
|
||||
|
||||
class container_unpaused(docker_event): |
||||
"""Container Unpaused Event""" |
||||
|
||||
|
||||
class image_untagged(docker_event): |
||||
"""Image Untagged Event""" |
||||
|
||||
|
||||
class image_deleted(docker_event): |
||||
"""Image Delete Event""" |
||||
|
||||
|
||||
class pull(docker_event): |
||||
"""Pull Event""" |
||||
|
||||
|
||||
DOCKER_EVENTS = { |
||||
u"attach": container_attached, |
||||
u"commit": container_committed, |
||||
u"create": container_created, |
||||
u"destroy": container_destroyed, |
||||
u"start": container_started, |
||||
u"stop": container_stopped, |
||||
u"kill": container_killed, |
||||
u"die": container_died, |
||||
u"export": container_exported, |
||||
u"pause": container_paused, |
||||
u"rename": container_renamed, |
||||
u"resize": container_resized, |
||||
u"restart": container_restarted, |
||||
u"unpause": container_unpaused, |
||||
u"untag": image_untagged, |
||||
u"delete": image_deleted, |
||||
u"pull": pull, |
||||
} |
@ -1,152 +0,0 @@ |
||||
#!/usr/bin/env python |
||||
# Module: main |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Daemon for Docker Automation""" |
||||
|
||||
|
||||
from __future__ import print_function |
||||
|
||||
|
||||
import sys |
||||
from time import time |
||||
from os import environ |
||||
from json import loads |
||||
from threading import Thread |
||||
from traceback import format_exc |
||||
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser |
||||
|
||||
|
||||
from docker import Client |
||||
from circuits.web import Server, JSONRPC |
||||
from circuits import handler, Component, Debugger |
||||
|
||||
|
||||
from .node import Node |
||||
from .utils import parse_bind |
||||
from .events import docker_event, DOCKER_EVENTS |
||||
|
||||
|
||||
class DockerRPCServer(Component): |
||||
|
||||
channel = "rpc" |
||||
|
||||
def init(self, bind, url, channel=channel): |
||||
self.client = Client(url) |
||||
|
||||
Server(bind).register(self) |
||||
JSONRPC(rpc_channel=self.channel).register(self) |
||||
|
||||
def ping(self, ts): |
||||
return time() - ts |
||||
|
||||
def docker(self, method, *args, **kwargs): |
||||
# TODO: Make this async |
||||
try: |
||||
return getattr(self.client, method)(*args, **kwargs) |
||||
except Exception as e: |
||||
return {"error": str(e), "traceback": format_exc()} |
||||
|
||||
|
||||
class DockerEventManager(Thread): |
||||
|
||||
def __init__(self, manager, url=None): |
||||
super(DockerEventManager, self).__init__() |
||||
|
||||
self.manager = manager |
||||
self.url = url |
||||
|
||||
self.daemon = True |
||||
|
||||
self.client = Client(self.url) |
||||
|
||||
def run(self): |
||||
for payload in self.client.events(): |
||||
event = loads(payload) |
||||
try: |
||||
status = event.pop("status") |
||||
docker_event = DOCKER_EVENTS.get(status) |
||||
if docker_event is not None: |
||||
self.manager.fire(docker_event(**event), "docker") |
||||
else: |
||||
print( |
||||
"WARNING: Unknown Docker Event <{0:s}({1:s})>".format( |
||||
status, repr(event) |
||||
), |
||||
file=sys.stderr |
||||
) |
||||
except Exception as e: |
||||
print( |
||||
"WARNING: Unknown payload {}".format(repr(event)), |
||||
file=sys.stderr, |
||||
) |
||||
|
||||
def stop(self): |
||||
self.client.close() |
||||
|
||||
|
||||
class EventBroadcaster(Component): |
||||
|
||||
def init(self, host="127.0.0.1", port=1338): |
||||
self.host = host |
||||
self.port = port |
||||
|
||||
self.node = Node(self.host, self.port).register(self) |
||||
|
||||
@handler("*", channel="docker") |
||||
def broadcast_docker_event(self, event, *args, **kwargs): |
||||
if isinstance(event, docker_event): |
||||
self.node.broadcast(event) |
||||
|
||||
|
||||
class App(Component): |
||||
|
||||
def init(self, args): |
||||
if args.debug: |
||||
Debugger().register(self) |
||||
|
||||
bind = parse_bind(args.bind) |
||||
|
||||
DockerRPCServer(bind, args.url).register(self) |
||||
DockerEventManager(self, args.url).start() |
||||
EventBroadcaster(*bind).register(self) |
||||
|
||||
def signal(self, *args): |
||||
raise SystemExit(0) |
||||
|
||||
|
||||
def parse_args(): |
||||
parser = ArgumentParser( |
||||
description=__doc__, |
||||
formatter_class=ArgumentDefaultsHelpFormatter |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-b", "--bind", action="store", dest="bind", metavar="INT", type=str, |
||||
default=environ.get("BIND", "0.0.0.0:1338"), |
||||
help="Interface and Port to Bind to" |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-d", "--debug", action="store_true", dest="debug", |
||||
default=environ.get("DEBUG", False), |
||||
help="Enable Debug Mode" |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-u", "--url", action="store", dest="url", metavar="URL", type=str, |
||||
default=environ.get("URL", None), |
||||
help="Docker Daemon URL" |
||||
) |
||||
|
||||
return parser.parse_args() |
||||
|
||||
|
||||
def main(): |
||||
App(parse_args()).run() |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
main() |
@ -1,84 +0,0 @@ |
||||
# Module: node |
||||
# Date: 20th March 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Peer to Peer Node Communcations |
||||
|
||||
This module aims to build enough essential functionality for |
||||
an application to employ distributed communications. |
||||
|
||||
Default Port:: |
||||
>>> s = "circuits.node" |
||||
>>> xs = map(ord, s) |
||||
>>> sum(xs) |
||||
1338 |
||||
|
||||
TODO: |
||||
[ ] Support UDP and TCP transports |
||||
[ ] Support Websockets |
||||
[ ] Support Web API |
||||
[ ] PUT /event |
||||
[ ] GET /event |
||||
[ ] Support Serializations: |
||||
[ ] json |
||||
[ ] msgpack |
||||
""" |
||||
|
||||
|
||||
from __future__ import print_function |
||||
|
||||
|
||||
import sys |
||||
from traceback import format_exc |
||||
|
||||
|
||||
from circuits.net.events import write |
||||
from circuits.net.sockets import UDPServer |
||||
from circuits import handler, BaseComponent, Event |
||||
|
||||
|
||||
from .codecs import dumps, loads |
||||
|
||||
|
||||
class hello(Event): |
||||
"""hello Event""" |
||||
|
||||
|
||||
class Node(BaseComponent): |
||||
|
||||
channel = "node" |
||||
|
||||
def __init__(self, host="0.0.0.0", port=1338, channel=channel): |
||||
super(Node, self).__init__(channel=channel) |
||||
|
||||
self.host = host |
||||
self.port = port |
||||
|
||||
# Peers we keep track of |
||||
self.peers = set() |
||||
|
||||
UDPServer((self.host, self.port), channel=self.channel).register(self) |
||||
|
||||
def broadcast(self, event): |
||||
for peer in self.peers: |
||||
self.send(event, peer) |
||||
|
||||
def send(self, event, peer): |
||||
data = dumps(event) |
||||
self.fire(write(peer, data)) |
||||
|
||||
@handler("read") |
||||
def _process_message(self, peer, data): |
||||
# Event Packet |
||||
try: |
||||
self.peers.add(peer) |
||||
event = loads(data) |
||||
self.fire(event, *event.channels) |
||||
except Exception as e: |
||||
print( |
||||
"ERROR: Could not parse packet.\n" |
||||
"Error: {} Data: {}".format(e, data), |
||||
file=sys.stderr |
||||
) |
||||
print(format_exc(), file=sys.stderr) |
@ -1,151 +0,0 @@ |
||||
# Module: plugin |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Plugin |
||||
|
||||
Subclass :class:`Plugin` to create autodock plugins with standarized CLI Options and API. |
||||
""" |
||||
|
||||
|
||||
from __future__ import print_function |
||||
|
||||
|
||||
from os import environ |
||||
from inspect import getmodule |
||||
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser |
||||
|
||||
|
||||
from circuits import Component, Debugger |
||||
from jsonrpc_requests import Server as RPCServer |
||||
|
||||
|
||||
from .node import hello, Node |
||||
from .utils import getenv, parse_bind |
||||
|
||||
|
||||
def parse_args(parse=True, description=None): |
||||
parser = ArgumentParser( |
||||
description=(description or ""), |
||||
formatter_class=ArgumentDefaultsHelpFormatter |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-b", "--bind", action="store", dest="bind", metavar="INT", type=str, |
||||
default=environ.get("BIND", "0.0.0.0:1338"), |
||||
help="Interface and Port to Bind to" |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-d", "--debug", action="store_true", dest="debug", |
||||
default=environ.get("DEBUG", False), |
||||
help="Enable Debug Mode" |
||||
) |
||||
|
||||
parser.add_argument( |
||||
"-u", "--url", action="store", dest="url", metavar="URL", type=str, |
||||
default=getenv("URL", "AUTODOCK_PORT", default="udp://autodock:1338"), |
||||
help="autodock Daemon URL" |
||||
) |
||||
|
||||
return parser.parse_args() if parse else parser |
||||
|
||||
|
||||
class Plugin(Component): |
||||
|
||||
def init(self, parse_args_cb=None): |
||||
# Get description from the first line of the plugin's __doc__ |
||||
description = getattr(getmodule(self), "__doc__", "") |
||||
|
||||
# Allow ArgumentsParser to be extended. |
||||
if parse_args_cb is not None: |
||||
self.args = parse_args_cb(parse_args(False, description)).parse_args() |
||||
else: |
||||
self.args = parse_args(description=description) |
||||
|
||||
self.bind = parse_bind(self.args.bind) |
||||
self.url = parse_bind(self.args.url) |
||||
|
||||
self.rpc = RPCServer("http://{}:{}".format(*self.url)) |
||||
|
||||
def started(self, *args): |
||||
if self.args.debug: |
||||
Debugger().register(self) |
||||
|
||||
self.node = Node(*self.bind).register(self) |
||||
|
||||
def ready(self, *args): |
||||
self.node.send(hello(), self.url) |
||||
|
||||
def container_created(self, event, **data): |
||||
"""Container created Event |
||||
|
||||
Override this in a subclass to receiver the created event |
||||
""" |
||||
|
||||
def container_destroyed(self, event, **data): |
||||
"""Container destroyed Event |
||||
|
||||
Override this in a subclass to receiver the destroyed event |
||||
""" |
||||
|
||||
def container_started(self, event, **data): |
||||
"""Container started Event |
||||
|
||||
Override this in a subclass to receiver the started event |
||||
""" |
||||
|
||||
def container_stopped(self, event, **data): |
||||
"""Container stopped Event |
||||
|
||||
Override this in a subclass to receiver the stopped event |
||||
""" |
||||
|
||||
def container_killed(self, event, **data): |
||||
"""Container killed Event |
||||
|
||||
Override this in a subclass to receiver the killed event |
||||
""" |
||||
|
||||
def container_died(self, event, **data): |
||||
"""Container died Event |
||||
|
||||
Override this in a subclass to receiver the died event |
||||
""" |
||||
|
||||
def container_exported(self, event, **data): |
||||
"""Container exported Event |
||||
|
||||
Override this in a subclass to receiver the exported event |
||||
""" |
||||
|
||||
def container_paused(self, event, **data): |
||||
"""Container paused Event |
||||
|
||||
Override this in a subclass to receiver the paused event |
||||
""" |
||||
|
||||
def container_restarted(self, event, **data): |
||||
"""Container restarted Event |
||||
|
||||
Override this in a subclass to receiver the restarted event |
||||
""" |
||||
|
||||
def container_unpaused(self, event, **data): |
||||
"""Container unpaused Event |
||||
|
||||
Override this in a subclass to receiver the unpaused event |
||||
""" |
||||
|
||||
def image_untagged(self, event, **data): |
||||
"""Image untagged Event |
||||
|
||||
Override this in a subclass to receiver the untagged event |
||||
""" |
||||
|
||||
def image_deleted(self, event, **data): |
||||
"""Image deleted Event |
||||
|
||||
Override this in a subclass to receiver the untagged event |
||||
""" |
@ -1,47 +0,0 @@ |
||||
# Module: utils |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, prologic at shortcircuit dot net dot au |
||||
|
||||
|
||||
"""Utilities""" |
||||
|
||||
|
||||
from os import environ |
||||
from time import sleep |
||||
from functools import partial |
||||
from socket import AF_INET, SOCK_STREAM, socket |
||||
|
||||
|
||||
def anyof(obj, *types): |
||||
return any(map(partial(isinstance, obj), types)) |
||||
|
||||
|
||||
def getenv(*keys, **kwargs): |
||||
for key in keys: |
||||
if key in environ: |
||||
return environ[key] |
||||
if "default" in kwargs: |
||||
return kwargs["default"] |
||||
raise KeyError(key) |
||||
|
||||
|
||||
def parse_bind(s, default_port=1338): |
||||
# XXX: We ignore the protocol for now |
||||
if "://" in s: |
||||
protocol, s = s.split("://", 1) |
||||
|
||||
if ":" in s: |
||||
address, port = s.split(":", 1) |
||||
port = int(port) |
||||
else: |
||||
address, port = s, default_port |
||||
|
||||
return address, port |
||||
|
||||
|
||||
def waitfor(address, port, timeout=10): |
||||
sock = socket(AF_INET, SOCK_STREAM) |
||||
counter = timeout |
||||
while not sock.connect_ex((address, port)) == 0 and counter: |
||||
sleep(1) |
||||
counter -= 1 |
@ -1,15 +0,0 @@ |
||||
# Package: version |
||||
# Date: 15th November 2014 |
||||
# Author: James Mills, j dot mills at griffith dot edu dot au |
||||
|
||||
"""Version Module |
||||
|
||||
So we only have to maintain version information in one place! |
||||
""" |
||||
|
||||
version_info = (0, 0, 2) # (major, minor, patch, dev?) |
||||
version = ( |
||||
".".join(map(str, version_info)) |
||||
if version_info[-1] != "dev" |
||||
else "dev" |
||||
) |
@ -0,0 +1,124 @@ |
||||
package client |
||||
|
||||
import ( |
||||
"crypto/tls" |
||||
"crypto/x509" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"path/filepath" |
||||
|
||||
"github.com/docker/docker/client" |
||||
"github.com/prologic/autodock/version" |
||||
log "github.com/sirupsen/logrus" |
||||
) |
||||
|
||||
const ( |
||||
apiVersion = "1.30" |
||||
defaultDockerURL = "unix:///var/run/docker.sock" |
||||
) |
||||
|
||||
// NewTLSConfig ...
|
||||
func NewTLSConfig(caCert, cert, key []byte, allowInsecure bool) (*tls.Config, error) { |
||||
// TLS config
|
||||
var tlsConfig tls.Config |
||||
tlsConfig.InsecureSkipVerify = true |
||||
certPool := x509.NewCertPool() |
||||
|
||||
certPool.AppendCertsFromPEM(caCert) |
||||
tlsConfig.RootCAs = certPool |
||||
keypair, err := tls.X509KeyPair(cert, key) |
||||
if err != nil { |
||||
return &tlsConfig, err |
||||
} |
||||
tlsConfig.Certificates = []tls.Certificate{keypair} |
||||
if allowInsecure { |
||||
tlsConfig.InsecureSkipVerify = true |
||||
} |
||||
|
||||
return &tlsConfig, nil |
||||
} |
||||
|
||||
// GetDockerURL ...
|
||||
func GetDockerURL(dockerURL string) string { |
||||
// check environment for docker client config
|
||||
envDockerHost := os.Getenv("DOCKER_HOST") |
||||
if dockerURL == "" && envDockerHost != "" { |
||||
dockerURL = envDockerHost |
||||
} |
||||
|
||||
if dockerURL == "" { |
||||
dockerURL = defaultDockerURL |
||||
} |
||||
|
||||
return dockerURL |
||||
} |
||||
|
||||
// GetDockerTLSConfig ...
|
||||
func GetDockerTLSConfig(tlsCaCert, tlsCert, tlsKey string, allowInsecure bool) *tls.Config { |
||||
envDockerCertPath := os.Getenv("DOCKER_CERT_PATH") |
||||
envDockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") |
||||
if tlsCaCert == "" && envDockerCertPath != "" && envDockerTLSVerify != "" { |
||||
tlsCaCert = filepath.Join(envDockerCertPath, "ca.pem") |
||||
tlsCert = filepath.Join(envDockerCertPath, "cert.pem") |
||||
tlsKey = filepath.Join(envDockerCertPath, "key.pem") |
||||
} |
||||
|
||||
if tlsCaCert != "" && tlsCert != "" && tlsKey != "" { |
||||
log.Debug("using tls for communication with docker") |
||||
caCert, err := ioutil.ReadFile(tlsCaCert) |
||||
if err != nil { |
||||
log.Fatalf("error loading tls ca cert: %s", err) |
||||
} |
||||
|
||||
cert, err := ioutil.ReadFile(tlsCert) |
||||
if err != nil { |
||||
log.Fatalf("error loading tls cert: %s", err) |
||||
} |
||||
|
||||
key, err := ioutil.ReadFile(tlsKey) |
||||
if err != nil { |
||||
log.Fatalf("error loading tls key: %s", err) |
||||
} |
||||
|
||||
cfg, err := NewTLSConfig(caCert, cert, key, allowInsecure) |
||||
if err != nil { |
||||
log.Fatalf("error configuring tls: %s", err) |
||||
} |
||||
cfg.InsecureSkipVerify = envDockerTLSVerify == "" |
||||
return cfg |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// GetDockerClient ...
|
||||
func GetDockerClient(dockerURL, tlsCaCert, tlsCert, tlsKey string, allowInsecure bool) (*client.Client, error) { |
||||
dockerURL = GetDockerURL(dockerURL) |
||||
|
||||
var httpClient *http.Client |
||||
|
||||
// load tlsconfig
|
||||
tlsConfig := GetDockerTLSConfig(tlsCaCert, tlsCert, tlsKey, allowInsecure) |
||||
|
||||
if tlsConfig != nil { |
||||
httpClient = &http.Client{ |
||||
Transport: &http.Transport{ |
||||
TLSClientConfig: tlsConfig, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
defaultHeaders := map[string]string{ |
||||
"User-Agent": fmt.Sprintf("autodock-%s", version.Version), |
||||
} |
||||
c, err := client.NewClient( |
||||
dockerURL, apiVersion, httpClient, defaultHeaders, |
||||
) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return c, nil |
||||
} |
@ -0,0 +1,94 @@ |
||||
package client |
||||
|
||||
import ( |
||||
"testing" |
||||
) |
||||
|
||||
const ( |
||||
TestCACert = `-----BEGIN CERTIFICATE----- |
||||
MIIC0jCCAbygAwIBAgIRAKmQU1XP3XW8ONTT7HQ9x+gwCwYJKoZIhvcNAQELMBQx |
||||
EjAQBgNVBAoTCWludGVybG9jazAeFw0xNjAyMjYyMjU0MDBaFw0xOTAyMTAyMjU0 |
||||
MDBaMBQxEjAQBgNVBAoTCWludGVybG9jazCCASIwDQYJKoZIhvcNAQEBBQADggEP |
||||
ADCCAQoCggEBALlWkKybRVQT3eRuuG+XuSz68dFZ8PN/NRIhJV8+nlwG2vu3Dy9S |
||||
hjGBPYyoMaIBYOahHuGhNMQuMgkforobeyJL2XUc31kgL3Beb7q4CqpUiWcwmI1w |
||||
zxw4Nfc1u1cORLo8nJvFIZ7V2qqJJp0bv/uZlb6Liuf8vtykmA7Qmr3Nixod71jY |
||||
jkZIxqz9U7q5bFBDxUw4oEb+UFA4kUdf71N+1cyn5IXN2QrFftpl31s5Xz0yrQlv |
||||
FUzEBJOof9jAF3ntHAus7IVoL9DrpKwmJ1w6zhilItku42jNZRPYDLQcf/wugkZS |
||||
JREN7x8VSz6hTpeUd5KlWvHB4ng+OCfdBQ8CAwEAAaMjMCEwDgYDVR0PAQH/BAQD |
||||
AgCsMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQAeLsLVHWBcfRUO |
||||
6zMOGR2yv3L40RMmwGTyAh/9VMvkx9zSFvEa7ilrsD6In5HLTCVtIc//9/UaCgA2
|
||||
xmnAWDiWbNcAHRn2ZOOvXkqVS7u7kdvSL+IGqEwQxdxx/WPkMpFBUKxsEaYGHmQc |
||||
L6cVDa7wg71DhpUbYelE/0x/28UTkswV4sLanXPJGlX4ZYvJ/POHrFlqZeJaYioP |
||||
TLOzQuZya72rhXmsElY0SlSVWBFaDo2+wJ688tfAAc3T+3GqZh17ArNpSUB/rsNE |
||||
TnEEMq7EDu3adFkocNW2L4Jp3Ny0oSBVoTX3NwF/fnld8qsNbW6qICCxTHJQyKRH |
||||
KyxQJWIx |
||||
-----END CERTIFICATE-----` |
||||
|
||||
TestCert = `-----BEGIN CERTIFICATE----- |
||||
MIIC7jCCAdigAwIBAgIRAMfciQv+IxRI4to5sDqUZpcwCwYJKoZIhvcNAQELMBQx |
||||
EjAQBgNVBAoTCWludGVybG9jazAeFw0xNjAyMjYyMjU0MDBaFw0xOTAyMTAyMjU0 |
||||
MDBaMBQxEjAQBgNVBAoTCWludGVybG9jazCCASIwDQYJKoZIhvcNAQEBBQADggEP |
||||
ADCCAQoCggEBAMJ1gkztg+FFdEMrp0KlCe7nyFfEycYxW1O5PMmqe3o1SijQM4qi |
||||
/NHUQfLYWiFBWXRkVTCuqPQsp5lZqhmD/Tzd5EjrvzyORrOZ2lVJLLnt+omoAsVh |
||||
V/lioUdKWQD5Mw1mFg6l/kjil//4VfWd3P0SdDHOqtriKQruwu07cdL+MuQ+/xjA
|
||||
SKGh6FWAUlb5FZHZxi9WubUc4jOKAbsA/WAKvRDDNVc93oy0925ummeOv5DsF2wn |
||||
X+ptNrFVFqP6pMteSSyEQFZROFkLjp5VfWoc2b9WeIrHs2OU/fM7knAkAwm3+KWA |
||||
GWiD3J9GVpG+vLnhReBj0heEGV8mg43/IlECAwEAAaM/MD0wDgYDVR0PAQH/BAQD |
||||
AgCoMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAMBgNVHRMBAf8EAjAA |
||||
MAsGCSqGSIb3DQEBCwOCAQEAlJWZO0HVrUHiRzI4PsBtq2yABLoqt4/c1V0FxkSL |
||||
EpsQPVSkpi/infvYojXo8XumivOKXCgs/4WwZoXYyhI81zppwCfy6CRYwJ8GvVpW |
||||
Gkj2iuw7VHYf9QPkQfmXAidnfChJwt7OSqkBVPS9KSM5zjWqwQUfZEQvnRbm+qgG |
||||
frYX3ikJm/Xs7heIjZyJdqV3o5eHcGpA6oK9aSpA283brrmen4cxUkW8Mt2KRccC |
||||
1l6ZZUEm73CMgWBYxnPZhIzTbbLTpMBGtdF0bJ16aEvD+92WSambRuJHhPOPpD9k |
||||
oUmZainyr6eRxb6bzE7qf5YfB+3uFZrPsEmH/rmZiNS3Cw== |
||||
-----END CERTIFICATE-----` |
||||
|
||||
TestKey = `-----BEGIN RSA PRIVATE KEY----- |
||||
MIIEpAIBAAKCAQEAwnWCTO2D4UV0QyunQqUJ7ufIV8TJxjFbU7k8yap7ejVKKNAz |
||||
iqL80dRB8thaIUFZdGRVMK6o9CynmVmqGYP9PN3kSOu/PI5Gs5naVUksue36iagC |
||||
xWFX+WKhR0pZAPkzDWYWDqX+SOKX//hV9Z3c/RJ0Mc6q2uIpCu7C7Ttx0v4y5D7/
|
||||
GMBIoaHoVYBSVvkVkdnGL1a5tRziM4oBuwD9YAq9EMM1Vz3ejLT3bm6aZ46/kOwX |
||||
bCdf6m02sVUWo/qky15JLIRAVlE4WQuOnlV9ahzZv1Z4isezY5T98zuScCQDCbf4 |
||||
pYAZaIPcn0ZWkb68ueFF4GPSF4QZXyaDjf8iUQIDAQABAoIBADOhscQtOFwC7fi7 |
||||
yYBXg8isQDSVqqF2D3Kud2ZwXrK3HYayqUzBM/GesxgAvAWibVcLINd5OKEEjkeY |
||||
WCLIOeAEZo26Ep/IgxtC2YbVlAuWFXShaILx8sLjnkDoi7NHd3eySF8BUgAWMhej |
||||
32cE0F1dnf5vikvtysn9VUJaC0HjCDJXQaRj/D9y99rJ2SkW77ahymBy1QSAXEm3 |
||||
Hg3q79vAqPczski7rRcoeKrnFiD0Y+4ho86NHyKFE/3O/e7GjpTWPdSBeNRcIMoI |
||||
xaqpmsMZp0s7Nzdk/2Qd8GmVYwncw/4WDebVIvmUlYQNcySRo2l6Flml2zwedviy |
||||
/0JFWRUCgYEA342mqQQstGlB1IM0T43x+BJ4AjOn0vGDPUrEvwu5zOT3IWggIIxS |
||||
RJYlcUIazN0KNmziY613v+GWPTC8NnWjQF3f/9++GVatAptzFyTk1IAKBX7oPu/d |
||||
JJFtwgtr66v5NqiGQyIsWuX19DLy32E5TVmZWKx9Tgg0xH71z3YXgbMCgYEA3q7U |
||||
a2Nkn86FTvsvdL5nigwuXMa/QQujZrTnzqMzJ7tYRauZXUG0Ns7cJU4fvMTfLUIG |
||||
4Nz2xqiEume0flnaBx6kAySe/9LUdmHBmvPtMF7GsJPHtdZZSDPdOdLOPqHrxnAz |
||||
E9E5vI62d4e4TCC5Vqxc8AQ+80B+8AYh528BIesCgYEAxf5zKSalYXQH9evunLcf |
||||
I5NX7rtJXC7DCbn63ynHeY0gw9mw+qLNCinhJ5pgmij7LpDpQVcVxEBMDA3p5GH0 |
||||
IMID7l9/wnld6f07xbfLY9mzBoMLtxJCTmzvRPlQr/40TxCbOUI+/pLFb27gZK97 |
||||
TOKaSksa/82MzquTkhcJYrkCgYEAzI/2eyA3U8a4F7IQCkLPgrVl8bxx/SLf3H3b |
||||
ZKvvVlR35qiYnl65Wo/1FCAMb7C7BCxffTn/SMeOBl82I8wOyfOP34NIvOHEY2uy |
||||
GtJx1bl69MMM9zINmpJqa7AH3umIWibABThyvZCsdmmrF+QH3mNAjQyZd4SMF5rK |
||||
knwaojkCgYAkhVol4h1dLhMaY/K6ttAAg/d6RuBVovsKnbXFzxUgHYUWm6pjWryK |
||||
iaKXFTXXkeGoX+NuQwL98CmLgFOgWo3P90r0SDsN9xqi4+MLiY9yzHAa+I5Qzt/k |
||||
eT0pvpz6WapVa/sXPmjZF2N/3AvCUGvrwCiOMV+yeYReJIRC0jVcSQ== |
||||
-----END RSA PRIVATE KEY-----` |
||||
) |
||||
|
||||
func TestGetTLSConfig(t *testing.T) { |
||||
cfg, err := GetTLSConfig( |
||||
[]byte(TestCACert), |
||||
[]byte(TestCert), |
||||
[]byte(TestKey), |
||||
true, |
||||
) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if cfg == nil { |
||||
t.Fatal("unexpected nil TLS config") |
||||
} |
||||
|
||||
if len(cfg.Certificates) != 1 { |
||||
t.Fatal("expected certificate in TLS config") |
||||
} |
||||
} |
@ -0,0 +1 @@ |
||||
autodock |
@ -0,0 +1,92 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
|
||||
log "github.com/sirupsen/logrus" |
||||
|
||||
pkgver "github.com/prologic/autodock/version" |
||||
|
||||
"github.com/namsral/flag" |
||||
"github.com/prologic/autodock/config" |
||||
"github.com/prologic/autodock/server" |
||||
) |
||||
|
||||
func main() { |
||||
var ( |
||||
dockerurl string |
||||
|
||||
tlsverify bool |
||||
tlscacert string |
||||
tlscert string |
||||
tlskey string |
||||
tls bool |
||||
|
||||
debug bool |
||||
version bool |
||||
|
||||
bind string |
||||
) |
||||
|
||||
flag.String(flag.DefaultConfigFlagname, "", "path to config file") |
||||
flag.BoolVar(&debug, "debug", false, "enable debug logging") |
||||
flag.BoolVar(&version, "v", false, "display version information") |
||||
|
||||
flag.StringVar( |
||||
&bind, "bind", "0.0.0.0:8000", |
||||
"[int]:<port> to bind to for HTTP", |
||||
) |
||||
|
||||
flag.StringVar(&dockerurl, "dockerurl", "", "Docker URL to connect to") |
||||
|
||||
flag.BoolVar(&tls, "tls", false, "Use TLS; implied by --tlsverify") |
||||
flag.StringVar( |
||||
&tlscacert, "tlscacert", "", |
||||
"Trust certs signed only by this CA", |
||||
) |
||||
flag.StringVar( |
||||
&tlscert, "tlscert", "", |
||||
"Path to TLS certificate file", |
||||
) |
||||
flag.StringVar( |
||||
&tlskey, "tlskey", "", |
||||
"Path to TLS key file", |
||||
) |
||||
flag.BoolVar( |
||||
&tlsverify, "tlsverify", true, |
||||
"Use TLS and verify the remote", |
||||
) |
||||
|
||||
flag.Parse() |
||||
|
||||
if version { |
||||
fmt.Printf("autodock v%s", pkgver.FullVersion()) |
||||
os.Exit(0) |
||||
} |
||||
|
||||
if debug { |
||||
log.SetLevel(log.DebugLevel) |
||||
} |
||||
|
||||
cfg := &config.Config{ |
||||
Debug: debug, |
||||
|
||||
Bind: bind, |
||||
|
||||
DockerURL: dockerurl, |
||||
TLSCACert: tlscacert, |
||||
TLSCert: tlscert, |
||||
TLSKey: tlskey, |
||||
AllowInsecure: !tlsverify, |
||||
} |
||||
|
||||
srv, err := server.NewServer(cfg) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
if err := srv.Run(); err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
} |
@ -0,0 +1,12 @@ |
||||
package config |
||||
|
||||
// Config ...
|
||||
type Config struct { |
||||
Debug bool |
||||
Bind string |
||||
DockerURL string |
||||
TLSCACert string |
||||
TLSCert string |
||||
TLSKey string |
||||
AllowInsecure bool |
||||
} |
@ -0,0 +1,14 @@ |
||||
package config |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
func TestZeroConfig(t *testing.T) { |
||||
assert := assert.New(t) |
||||
|
||||
cfg := Config{} |
||||
assert.True(true) |
||||
} |
@ -0,0 +1,21 @@ |
||||
version: "3.3" |
||||
|
||||
services: |
||||
autodock: |
||||
image: prologic/autodock:go-port |
||||
command: -debug |
||||
networks: |
||||
- autodock |
||||
volumes: |
||||
- /var/run/docker.sock:/var/run/docker.sock |
||||
deploy: |
||||
placement: |
||||
constraints: |
||||
- "node.role == manager" |
||||
restart_policy: |
||||
condition: on-failure |
||||
replicas: 1 |
||||
|
||||
< |