Browse Source

upload

master
Satrio Arditama 3 years ago
parent
commit
3ad678f910
17 changed files with 2444 additions and 620 deletions
  1. +12
    -0
      Dockerfile
  2. +719
    -618
      LICENSE
  3. +44
    -0
      Makefile
  4. +272
    -2
      README.md
  5. +5
    -0
      build-tests.sh
  6. +80
    -0
      config.json
  7. +240
    -0
      doc/ipo.md
  8. +327
    -0
      getting-started.md
  9. +13
    -0
      go.mod
  10. +34
    -0
      go.sum
  11. +344
    -0
      main.go
  12. +4
    -0
      run-tests.sh
  13. +34
    -0
      test/Dockerfile
  14. +31
    -0
      test/docker-compose.yml
  15. +11
    -0
      test/docker-entrypoint.sh
  16. +27
    -0
      test/test-environment.sh
  17. +247
    -0
      test/test-run.sh

+ 12
- 0
Dockerfile View File

@ -0,0 +1,12 @@
FROM golang:1.14
COPY . /app/
WORKDIR /app
RUN go build
FROM bitnami/minideb:stretch
RUN apt-get update && \
apt-get install lizardfs-client -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /app/lizardfs-volume-plugin /usr/bin/
ENTRYPOINT ["lizardfs-volume-plugin"]

+ 719
- 618
LICENSE
File diff suppressed because it is too large
View File


+ 44
- 0
Makefile View File

@ -0,0 +1,44 @@
PLUGIN_NAME = docker.inweb.id/lizardfs-volume-plugin
PLUGIN_TAG ?= 3.10
TRAVIS_BUILD_NUMBER ?= local
all: clean rootfs create
clean:
@echo "### rm ./plugin"
@rm -rf ./plugin
config:
@echo "### copy config.json to ./plugin/"
@mkdir -p ./plugin
@cp config.json ./plugin/
rootfs: config
@echo "### docker build: rootfs image with"
@docker build -t ${PLUGIN_NAME}:rootfs .
@echo "### create rootfs directory in ./plugin/rootfs"
@mkdir -p ./plugin/rootfs
@docker create --name tmp ${PLUGIN_NAME}:rootfs
@docker export tmp | tar -x -C ./plugin/rootfs
@docker rm -vf tmp
create:
@echo "### remove existing plugin ${PLUGIN_NAME}:${PLUGIN_TAG} if exists"
@docker plugin rm -f ${PLUGIN_NAME}:${PLUGIN_TAG} || true
@docker plugin rm -f ${PLUGIN_NAME}:${TRAVIS_BUILD_NUMBER} || true
@echo "### create new plugin ${PLUGIN_NAME}:${PLUGIN_TAG} from ./plugin"
@docker plugin create ${PLUGIN_NAME}:${PLUGIN_TAG} ./plugin
@docker plugin create ${PLUGIN_NAME}:${TRAVIS_BUILD_NUMBER} ./plugin
enable:
@echo "### enable plugin ${PLUGIN_NAME}:${PLUGIN_TAG}"
@docker plugin enable ${PLUGIN_NAME}:${PLUGIN_TAG}
disable:
@echo "### disable plugin ${PLUGIN_NAME}:${PLUGIN_TAG}"
@docker plugin disable ${PLUGIN_NAME}:${PLUGIN_TAG}
push: clean rootfs create
@echo "### push plugin ${PLUGIN_NAME}:${PLUGIN_TAG}"
@docker plugin push ${PLUGIN_NAME}:${TRAVIS_BUILD_NUMBER}
@docker plugin push ${PLUGIN_NAME}:${PLUGIN_TAG}

+ 272
- 2
README.md View File

@ -1,3 +1,273 @@
# lizardfs-docker-volume-plugin
# LizardFS Docker Plugin
LizardFS 3.10 volume plugin for Docker
A Docker volume driver plugin for mounting a [LizardFS](https://lizardfs.com) filesystem. Allows you to transparently provide storage for your Docker containers using LizardFS. This plugin can be used in combination with our [LizardFS Docker Image](https://github.com/kadimasolutions/docker_lizardfs) to create a fully containerized, clustered storage solution for Docker Swarm. Documentation and development are still in progress. A guide for getting started with Swarm can be found in [Getting Started](getting-started.md). The Swarm usage will likely be changed soon in favor of combining the LizardFS services with the plugin.
[![Build Status](https://travis-ci.org/lizardfs/lizardfs-docker-volume-plugin.svg?branch=master)](https://travis-ci.org/lizardfs/lizardfs-docker-volume-plugin)
## Usage
### Prerequisites
Before you can use the plugin you must have:
* A running LizardFS cluster that your Docker host can access.
* A directory on the LizardFS filesystem that can be used by the plugin to store Docker volumes. This can be any normal directory. By default the plugin will use `/docker/volumes`, but this can be changed ( see [REMOTE_PATH](#remote-path) ).
Once these conditions are met you are ready to install the plugin.
### Installation
The plugin is simple use and can be installed as a Docker container without having to install any other system dependencies.
$ docker plugin install --alias lizardfs kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster PORT=9421
Docker will prompt asking if you want to grant the permissions required to run the plugin. Select yes and the plugin will download and install.
> **Note:** We set the plugin alias to `lizardfs`. This is completely optional, but it allows us to refer to the plugin with a much shorter name. Throughout this readme, when reference is made to the `lizardfs` driver, it is referring to this alias.
That's it! You can now see your newly installed Docker plugin by running `docker plugin ls`.
$ docker plugin ls
ID NAME DESCRIPTION ENABLED
4a08a23cf2eb lizardfs:latest LizardFS volume plugin for Docker true
You should now be able to create a Docker volume using our new `lizardfs` driver.
$ docker volume create --driver lizardfs lizard-vol
lizard-vol
You can see it by running `docker volume ls`.
$ docker volume ls
DRIVER VOLUME NAME
lizardfs:latest lizard-vol
Now that you have created the volume you can mount it into a container using its name. Lets mount it into an alpine container and put some data in it.
```sh
$ docker run -it --rm -v lizard-vol:/data alpine sh
/ $ cd /data # Switch to our volume mountpoint
/data $ cp -R /etc . # Copy the whole container /etc directory to it
/data $ ls # See that the copy was successful
etc
/data $ exit # Exit ( the container will be removed because of the --rm )
```
We should now have a copy of the alpine container's whole `/etc` directory on our `lizard-vol` volume. You can verify this by checking the `/docker/volumes/lizard-vol/` directory on your LizardFS installation. You should see the `etc` folder with all of its files and folders in it. Congratulations! You have successfully mounted your LizardFS filesytem into a docker container and stored data in it!
If you run another container, you can mount the same volume into it and that container will also see the data. Your data will stick around as long as that volume exists. When you are done with it, you can remove the volume by running `docker volume rm lizard-vol`.
### Features
#### Shared Mounts
Any number of containers on any number of hosts can mount the same volume at the same time. The only requirement is that each Docker host have the LizardFS plugin installed on it.
#### Transparent Data Storage ( No Hidden Metadata )
Each LizardFS Docker volume maps 1-to-1 to a directory on the LizardFS filesystem. All directories in the [REMOTE_PATH](#remote-path) on the LizardFS filesystem will be exposed as a Docker volume regardless of whether or not the directory was created by running `docker volume create`. There is no special metadata or any other extra information used by the plugin to keep track of what volumes exist. If there is a directory there, it is a Docker volume and it can be mounted ( and removed ) by the LizardFS plugin. This makes it easy to understand and allows you to manage your Docker volumes directly on the filesystem, if necessary, for things like backup and restore.
#### LizardFS Global Trash Bin
Using LizardFS for your Docker volumes means that you now get the benefit of LizardFS's global trash bin. Removed files and volumes can be restored using LizardFS's [trash bin](https://docs.lizardfs.com/adminguide/advanced_configuration.html?highlight=trash#mounting-the-meta-data) mechanism. Note that the plugin itself has nothing to do with this; it is a native feature of LizardFS.
#### Multiple LizardFS Clusters
It is also possible, if you have multiple LizardFS clusters, to install the plugin multiple times with different settings for the different clusters. For example, if you have two LizardFS clusters, one at `mfsmaster1` and another at `mfsmaster2`, you can install the plugin two times, with different aliases, to allow you to create volumes on both clusters.
$ docker plugin install --alias lizardfs1 --grant-all-permissions kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster1 PORT=9421
$ docker plugin install --alias lizardfs2 --grant-all-permissions kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster2 PORT=9421
This gives you the ability to create volumes for both clusters by specifying either `lizardfs1` or `lizardfs2` as the volume driver when creating a volume.
#### Root Mount Option
The plugin has the ability to provide a volume that contains *all* of the LizardFS Docker volumes in it. This is called the Root Volume and is identical to mounting the configured `REMOTE_PATH` on your LizardFS filesystem into your container. This volume does not exist by default. The Root Volume is enabled by setting the `ROOT_VOLUME_NAME` to the name that you want the volume to have. You should pick a name that does not conflict with any other volume. If there is a volume with the same name as the Root Volume, the Root Volume will take precedence over the other volume.
There are a few different uses for the Root Volume. Kadima Solutions designed the Root Volume feature to accommodate for containerized backup solutions. By mounting the Root Volume into a container that manages your Backups, you can backup *all* of your LizardFS Docker volumes without having to manually add a mount to the container every time you create a new volume that needs to be backed up.
The Root Volume also give you the ability to have containers create and remove LizardFS volumes without having to mount the Docker socket and make Docker API calls. Volumes can be added, removed, and otherwise manipulated simply by mounting the Root Volume and making the desired changes.
### Known Issues
#### Hangs on Unresponsive LizardFS Master
In most cases, when the plugin cannot connect to the LizardFS cluster, the plugin will timeout quickly and simply fail to create mounts or listings of volumes. However, when the plugin *has* been able to open a connection with the LizardFS master, and the LizardFS master subsequently fails to respond, a volume list operation will cause the plugin to hang for a period of time. This will cause any Docker operations that request the volume list to freeze while the plugin attempts to connect to the cluster. To fix the issue, the connectivity to the LizardFS master must be restored, otherwise the plugin should be disabled to prevent stalling the Docker daemon.
## Configuration
### Plugin Configuration
You can configure the plugin through plugin variables. You may set these variables at installation time by putting `VARIABLE_NAME=value` after the plugin name, or you can set them after the plugin has been installed using `docker plugin set kadimasolutions/lizardfs-volume-plugin VARIABLE_NAME=value`.
> **Note:** When configuring the plugin after installation, the plugin must first be disabled before you can set variables. There is no danger of accidentally setting variables while the plugin is enabled, though. Docker will simply tell you that it is not possible.
#### HOST
The hostname/ip address that will be used when connecting to the LizardFS master.
> **Note:** The plugin runs in `host` networking mode. This means that even though it is in a container, it shares its network configuration with the host and should resolve all network addresses as the host system would.
**Default:** `mfsmaster`
#### PORT
The port on which to connect to the LizardFS master.
**Default:** `9421`
#### MOUNT_OPTIONS
Options passed to the `mfsmount` command when mounting LizardFS volumes. More information can be found in the [LizardFS documentation](https://docs.lizardfs.com/man/mfsmount.1.html).
**Default:** empty string
#### REMOTE_PATH
The path on the LizardFS filesystem that Docker volumes will be stored in. This path will be mounted for volume storage by the plugin and must exist on the LizardFS filesystem. The plugin fail to connect to the master server if the path does not exist.
**Default:** `/docker/volumes`
#### ROOT_VOLUME_NAME
The name of the Root Volume. If specified, a special volume will be created of the given name will be created that will contain all of the LizardFS volumes. It is equivalent to mounting the `REMOTE_PATH` on the LizardFS filesystem. See [Root Mount Option](#root-mount-option).
**Default:** empty string
#### CONNECT_TIMEOUT
The timeout for LizardFS mount commands. If a mount takes longer than the `CONNECT_TIMEOUT` in milliseconds, it will be terminated and the volume will not be mounted. This is to keep Docker operations from hanging in the event of an unresponsive master.
**Default:** `10000`
#### LOG_LEVEL
Plugin logging level. Set to `DEBUG` to get more verbose log messages. Logs from Docker plugins can be found in the Docker log and will be suffixed with the plugin ID.
**Default:** `INFO`
### Volume Options
Volume options are options that can be passed to Docker when creating a Docker volume. Volume options are set per volume, therefore setting an option for one volume does not set that option for any other volume.
Volume options can be passed in on the command line by
adding `-o OptionName=value` after the volume name. For example:
$ docker volume create -d lizardfs my-volume -o ReplicationGoal=3
#### ReplicationGoal
The replication goal option can be used to set the LizardFS replication goal on a newly created volume. The goal can be any valid goal name or number that exists on the LizardFS master. See the LizardFS [documentation](https://docs.lizardfs.com/adminguide/replication.html) for more information.
Note that even after a volume has been created and a goal has been set, it is still possible to manually change the goal of the volume directory on the LizardFS filesystem manually. For example, assuming you have mounted the LizardFS filesystem manually ( not using a docker volume ):
lizardfs setgoal goal_name /mnt/mfs/docker/volumes/volume_name
Also, if you want to set a default goal for all of your Docker volumes, you can manually set the goal of the directory containing your docker volumes on the LizardFS filesystem ( `/docker/volumes` by default, see [REMOTE_PATH](#remote-path) ).
**Default:** empty string
## Development
Docker plugins are made up of a `config.json` file and `rootfs` directory. The `config.json` has all of the metadata and information about the plugin that Docker needs when installing and configuring the plugin. The `rootfs` is the root filesystem of the plugin container. Unfortunately the Docker CLI doesn't allow you to create Docker plugins using a Dockerfile so we use a Makefile to automate the process of creating the plugin `rootfs` from a Dockerfile.
### Building the Plugin
To build the plugin simply run `make rootfs` in the project directory.
$ make rootfs
This will build the Dockerfile, export the new Docker image's rootfs, and copy the rootfs and the config.json file to the `plugin` directory. When it is done you should have a new plugin directory with a config.json file and a rootfs folder in it.
```
plugin/
config.json
rootfs/
```
After that is finished you can run `make create`.
$ make create
This will install the Docker plugin from the `plugin` dirctory with the name `kadimasolutions/lizardfs-volume-plugin`.
Finally run `make enable` to start the plugin.
$ make enable
Here is a list of the `make` targets:
* **clean**: Remove the `plugin` directory
* **config**: Copy the `config.json` file to the `plugin` directory
* **rootfs**: Generate the plugin rootfs from the Dockerfile and put it in the `plugin` directory with the `config.json`
* **create**: Install the plugin from the `plugin` directory
* **enable**: Enable the plugin
* **disable**: Disable the plugin
* **push**: Run the `clean`, `rootfs`, `create`, and `enable` targets, and push the plugin to DockerHub
### Running the tests
The automated tests for the plugin are run using a Docker-in-Docker container that creates a Dockerized LizardFS cluster to test the plugin against. When you run the test container, it will install the plugin inside the Docker-in-Docker container and proceed to create a Dockerized LizardFS cluster in it as well. A shell script is run that manipulates the plugin and runs containers to ensure the plugin behaves as is expected.
Before you can run the tests, the test Docker image must first be built. This is done by running the `build-tests.sh` script.
$ ./build-tests.sh
This will build a Docker image, `lizardfs-volume-plugin_test`, using the Dockerfile in the `test` directory. After the image has been built, you can use it to run the tests against the plugin. This is done with the `run-tests.sh` script.
$ ./run-tests.sh
By default running `run-tests.sh` will install the plugin from the `plugin` directory before running the tests against it. This means that you must first build the plugin by running `make rootfs`, if you have not already done so. Alternatively, you can also run the tests against a version of the plugin from DockerHub by passing in the plugin tag as a parameter to the `run-tests.sh` script.
$ ./run-tests.sh kadimasolutions/lizardfs-volume-plugin:latest
This will download the plugin from DockerHub and run the tests against that version of the plugin.
### Tips & Tricks
If you don't have a fast disk on your development machine, developing Docker plugins can be somewhat tricky, because it can take some time to build and install the plugin every time you need to make a change. Here are some tricks that you can use to help maximize your development time.
#### Patching the Plugin Rootfs
All of the plugin logic is in the `index.js` file. During development it can take a long time to rebuild the entire plugin every time you need to test a change to `index.js`. To get around this, it is possible to copy just that file into the installed plugin without having to reinstall the entire plugin.
When you install a Docker plugin, it is given a plugin ID. You can see the first 12 characters of the plugin ID by running `docker plugin ls`.
```
$ docker plugin ls
ID NAME DESCRIPTION ENABLED
2f5b68535b92 kadimasolutions/lizardfs-volume-plugin:latest LizardFS volume plugin for Docker false
```
Using that ID you can find where the plugin's rootfs was installed. By default, it should be located in `/var/lib/docker/plugins/[pluginID]/rootfs`. For our particular plugin, the file that we need to replace is the `/project/index.js` file in the plugin's rootfs. By replacing that file with an updated version and restarting ( disabling and re-enabling ) the plugin, you can update the plugin without having to re-install it.
#### Exec-ing Into the Plugin Container
It may be useful during development to exec into the plugin container while it is running. You can find out how in the [Docker Documentation](https://docs.docker.com/engine/extend/#debugging-plugins).
#### Test Case Development
Writing new automated test cases for the plugin can also be difficult because of the time required for the test container to start. When writing new test cases for the plugin, it may be useful to start the container and interactively run the tests. If you make a mistake that causes a test to fail, even though the plugin *is* working, you can still edit and re-run the tests without having to restart the test container completely.
Once you have built the test image using the `build-tests.sh` script, you need to run the test container as a daemon that you can exec into. We override the entrypoint of the container so that it won't run the test script as soon as it starts. We want it just to sit there and wait for us to run commands in it.
$ docker run -it --rm -d --name lizardfs-test --privileged \
-v $(pwd)/plugin:/plugin \
-v $(pwd)/test/test-run.sh:/test-run.sh \
--entrypoint=sh \
lizardfs-volume-plugin_test
> **Note:** We also mount our `test-run.sh` script into the container so that updates to the script are reflected immediately in the container.
After the container is running we can shell into it and run the script that starts up Docker.
$ docker exec -it lizardfs-test sh
/project # /test-environment.sh
This will start Docker, load the LizardFS image used for creating the test LizardFS environment, and install the plugin from the plugin directory. Once this is done you can run the tests.
/project # sh /test-run.sh
This will run through all of the tests. If the tests fail, you can still edit and re-run the `test-run.sh` script without having to re-install the plugin.
When you are done writing your test cases, you can `exit` the shell and `docker stop lizardfs-test`. The container will be automatically removed after it stops. You should make sure that your tests still run correctly in a completely fresh environment by rebuilding and re-running the tests using the `build-tests.sh` and `run-tests.sh` scripts.

+ 5
- 0
build-tests.sh View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
pushd test
docker build \
-t lizardfs-volume-plugin_test .
popd

+ 80
- 0
config.json View File

@ -0,0 +1,80 @@
{
"description": "LizardFS 3.10 volume plugin for Docker",
"documentation": "https://docs.docker.com/engine/extend/plugins/",
"workdir": "/app",
"entrypoint": [
"lizardfs-volume-plugin"
],
"env": [
{
"name": "HOST",
"settable": [
"value"
],
"value": "mfsmaster"
},
{
"name": "PORT",
"settable": [
"value"
],
"value": "9421"
},
{
"name": "ROOT_VOLUME_NAME",
"settable": [
"value"
],
"value": ""
},
{
"name": "MOUNT_OPTIONS",
"settable": [
"value"
],
"value": ""
},
{
"name": "REMOTE_PATH",
"settable": [
"value"
],
"value": "/docker/volumes"
},
{
"name": "CONNECT_TIMEOUT",
"settable": [
"value"
],
"value": "10000"
},
{
"name": "LOG_LEVEL",
"settable": [
"value"
],
"value": "info"
}
],
"interface": {
"socket": "lizardfs310.sock",
"types": [
"docker.volumedriver/2.0"
]
},
"linux": {
"capabilities": [
"CAP_SYS_ADMIN"
],
"devices": [
{
"path": "/dev/fuse"
}
]
},
"network": {
"type": "host"
},
"propagatedMount": "/mnt/docker-volumes"
}

+ 240
- 0
doc/ipo.md View File

@ -0,0 +1,240 @@
# IPO Outline
This document outlines the basic Input-Process-Output flow of the volume plugin.
## Environment
The LizardFS Docker plugin implements the [Docker Plugin API](https://docs.docker.com/engine/extend/plugin_api/). The Inputs to the program are requests made by the Docker daemon to the plugin. Request such as `Plugin.Activate`, and `VolumeDriver.Create`, will be sent by the Docker daemon to the the unix socket, `/run/docker/plugins/lizardfs.sock`, and the LizardFS Docker plugin will process the request, take the required actions, and respond with an appropriate response.
## Requests
These are the requests that Docker will make to the plugin over the Unix socket. All requests will be HTTP POST requests and may contain a JSON payload. The plugin's response to the request should also be a JSON payload if applicable. Details about these requests can be found in the Docker documentation for the [Plugins API](https://docs.docker.com/engine/extend/plugin_api/) and the [Volume Plugin API](https://docs.docker.com/engine/extend/plugins_volume/#volumedrivercapabilities).
### /Plugin.Activate
#### Input
Empty payload.
#### Process
* Mount a subpath of the LizardFS filesystem specified by the `REMOTE_PATH` environment variable ( `/docker/volumes` by default) to `/mnt/lizardfs`. This is where the docker volumes will be stored. The `/mnt/lizardfs` directory will be referred to as the "volume root" throughout this document.
#### Output
```json
{
"Implements": ["VolumeDriver"]
}
```
### /VolumeDriver.Create
#### Input
```json
{
"Name": "volume_name",
"Opts": {
"ReplicationGoal": "replication_goal_number_or_name"
}
}
```
#### Process
* Create sub-directory of volume root with the given `Name`. For example, `/mnt/lizardfs/volume_name`.
* Use `lizardfs setgoal` to set the replication goal for that Docker Volume to the value specified in the `Opts` ( if specified ).
#### Output
Error message ( if one occurred ).
```json
{
"Err": ""
}
```
### /VolumeDriver.Remove
#### Input
```json
{
"Name": "volume_name"
}
```
#### Process
* Delete the directory in the volume root with the given `Name`. For example, `/mnt/lizardfs/volume_name`.
#### Output
Error message ( if one occurred ).
```json
{
"Err": ""
}
```
### /VolumeDriver.Mount
#### Input
```json
{
"Name": "volume_name",
"ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
}
```
#### Process
* Create a directory outside of the LizardFS root mountpoint using the given `Name`, such as `/mnt/docker-volumes/volume_name`.
* Mount the subpath of the LizardFS filesystem ( for example, `/docker/volumes/volume_name` ) to the newly created mountpoint.
* Add the `ID` to the list of containers that have mounted `Name` in the `mounted_volumes` Javascript object. This variable is used to keep track of which containers have mounted the volume.
#### Output
We need to tell Docker where we mounted the volume or give an error message if there was a problem.
```json
{
"Mountpoint": "/mnt/docker-volumes/volume_name",
"Err": ""
}
```
### /VolumeDriver.Path
#### Input
```json
{
"Name": "volume_name"
}
```
#### Process
* Determine the path at which the volume is mounted based on the `Name`.
#### Output
Error message ( if one occurred ).
```json
{
"Mountpoint": "/mnt/docker-volumes/volume_name",
"Err": ""
}
```
### /VolumeDriver.Unmount
#### Input
```json
{
"Name": "volume_name",
"ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
}
```
#### Process
* Remove the `ID` from the list of containers that have mounted `Name` in `mounted_volumes` Javascript variable.
* If there are no containers in the list anymore, unmount the `/mnt/docker-volumes/volume_name` because it no longer needs to be mounted.
#### Output
Error message ( if one occurred ).
```json
{
"Err": ""
}
```
### /VolumeDriver.Get
#### Input
```json
{
"Name": "volume_name"
}
```
#### Process
* Make sure the volume exists: check that the directory of the name `volume_name` exists and that the process has read-write access.
* If the volume is mounted, return the mountpoint as well as the name.
#### Output
Return the volume name
```json
{
"Volume": {
"Name": "volume_name",
"Mountpoint": "/mnt/docker-volumes/volume_name",
},
"Err": "Error if directory doesn't exist or we don't have read-write access to it."
}
```
### /VolumeDriver.List
#### Input
```json
{}
```
#### Process
* Get a list of the directories in the volume root: `/mnt/lizardfs/`.
* If the volume is mounted on the host, provide the `Mountpoint`.
#### Output
Error message ( if one occurred ).
```json
{
"Volumes": [
{
"Name": "volume_name",
"Mountpoint": "/mnt/docker-volumes/volume_name"
}
],
"Err": ""
}
```
### /VolumeDriver.Capabilities
#### Input
```json
{}
```
#### Process
Not applicable.
#### Output
```json
{
"Capabilities": {
"Scope": "global"
}
}
```

+ 327
- 0
getting-started.md View File

@ -0,0 +1,327 @@
Hello Everybody, I have recently developed a Docker plugin that allows you to create LizardFS Docker volumes! There are two different versions of the plugin: a Docker managed plugin that works well for individual Docker instances, and a version that can be deployed as a stack on Docker Swarm to create a self-contained storage solution for a Docker cluster.
The Docker plugin has been developed by me and my team at @kadimasolutions to create a distributed storage solution that can be deployed on Docker Swarm and provide shared volumes for the containers in the Docker Swarm cluster. As far as I have found it is **the only** solution that does so.
We will soon mirror the source code for the plugin to GitHub. In the meantime, you can test out the plugin using the Docker images that are on DockerHub. The plugin can be considered in beta and is, as far as I can tell, completely functional, but there may still be bugs or nuances that we have not yet found. Feedback is appreciated. :smiley: Updates to the image on DockerHub may be made without notice, I will try to mention any changes that I make here.
In addition, I will soon be attempting to get the Swarm deployment setup with the very latest highly available LizardFS master from the LizardFS 3.13 release candidate ( thanks to the folks at @lizardfs for getting that to me early ) so that the deployed LizardFS cluster will have automatic failover.
Here are detailed instructions for getting started with both versions of the plugin. If you need any help just comment on this thread and I will try to do what I can when I have the time.
# Docker Managed plugin
The Docker managed plugin can be installed very easily on any Docker host and is great for connecting your Docker containers to an existing LizardFS cluster.
> **Note:** If you don't have a LizardFS cluster, yet you may want to consider using the Swarm deployment instead. You can use the Docker Swarm deployment to create a LizardFS cluster out of your Docker hosts that will supply your Docker containers with shared LizardFS volumes that are distributed across you Docker cluster.
## Usage
### Prerequisites
Before you can use the plugin you must have:
* A running LizardFS cluster that your Docker host can access.
* A directory on the LizardFS filesystem that can be used by the plugin to store Docker volumes. This can be any normal directory. By default the plugin will use `/docker/volumes`, but this can be changed ( see [REMOTE_PATH](#remote-path) ).
Once these conditions are met you are ready to install the plugin.
### Installation
The plugin is simple use and can be installed as a Docker container without having to install any other system dependencies.
$ docker plugin install --alias lizardfs kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster PORT=9421
Docker will prompt asking if you want to grant the permissions required to run the plugin. Select yes and the plugin will download and install.
> **Note:** We set the plugin alias to `lizardfs`. This is completely optional, but it allows us to refer to the plugin with a much shorter name. Throughout this readme, when reference is made to the `lizardfs` driver, it is referring to this alias.
That's it! You can now see your newly installed Docker plugin by running `docker plugin ls`.
$ docker plugin ls
ID NAME DESCRIPTION ENABLED
4a08a23cf2eb lizardfs:latest LizardFS volume plugin for Docker true
You should now be able to create a Docker volume using our new `lizardfs` driver.
$ docker volume create --driver lizardfs lizard-vol
lizard-vol
You can see it by running `docker volume ls`.
$ docker volume ls
DRIVER VOLUME NAME
lizardfs:latest lizard-vol
Now that you have created the volume you can mount it into a container using its name. Lets mount it into an alpine container and put some data in it.
```sh
$ docker run -it --rm -v lizard-vol:/data alpine sh
/ $ cd /data # Switch to our volume mountpoint
/data $ cp -R /etc . # Copy the whole container /etc directory to it
/data $ ls # See that the copy was successful
etc
/data $ exit # Exit ( the container will be removed because of the --rm )
```
We should now have a copy of the alpine container's whole `/etc` directory on our `lizard-vol` volume. You can verify this by checking the `/docker/volumes/lizard-vol/` directory on your LizardFS installation. You should see the `etc` folder with all of its files and folders in it. Congratulations! You have successfully mounted your LizardFS filesytem into a docker container and stored data in it!
If you run another container, you can mount the same volume into it and that container will also see the data. Your data will stick around as long as that volume exists. When you are done with it, you can remove the volume by running `docker volume rm lizard-vol`.
### Features
#### Shared Mounts
Any number of containers on any number of hosts can mount the same volume at the same time. The only requirement is that each Docker host have the LizardFS plugin installed on it.
#### Transparent Data Storage ( No Hidden Metadata )
Each LizardFS Docker volume maps 1-to-1 to a directory on the LizardFS filesystem. All directories in the [REMOTE_PATH](#remote-path) on the LizardFS filesystem will be exposed as a Docker volume regardless of whether or not the directory was created by running `docker volume create`. There is no special metadata or any other extra information used by the plugin to keep track of what volumes exist. If there is a directory there, it is a Docker volume and it can be mounted ( and removed ) by the LizardFS plugin. This makes it easy to understand and allows you to manage your Docker volumes directly on the filesystem, if necessary, for things like backup and restore.
#### LizardFS Global Trash Bin
Using LizardFS for your Docker volumes means that you now get the benefit of LizardFS's global trash bin. Removed files and volumes can be restored using LizardFS's [trash bin](https://docs.lizardfs.com/adminguide/advanced_configuration.html?highlight=trash#mounting-the-meta-data) mechanism. Note that the plugin itself has nothing to do with this; it is a native feature of LizardFS.
#### Multiple LizardFS Clusters
It is also possible, if you have multiple LizardFS clusters, to install the plugin multiple times with different settings for the different clusters. For example, if you have two LizardFS clusters, one at `mfsmaster1` and another at `mfsmaster2`, you can install the plugin two times, with different aliases, to allow you to create volumes on both clusters.
$ docker plugin install --alias lizardfs1 --grant-all-permissions kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster1 PORT=9421
$ docker plugin install --alias lizardfs2 --grant-all-permissions kadimasolutions/lizardfs-volume-plugin HOST=mfsmaster2 PORT=9421
This gives you the ability to create volumes for both clusters by specifying either `lizardfs1` or `lizardfs2` as the volume driver when creating a volume.
#### Root Mount Option
The plugin has the ability to provide a volume that contains *all* of the LizardFS Docker volumes in it. This is called the Root Volume and is identical to mounting the configured `REMOTE_PATH` on your LizardFS filesystem into your container. This volume does not exist by default. The Root Volume is enabled by setting the `ROOT_VOLUME_NAME` to the name that you want the volume to have. You should pick a name that does not conflict with any other volume. If there is a volume with the same name as the Root Volume, the Root Volume will take precedence over the other volume.
There are a few different uses for the Root Volume. Kadima Solutions designed the Root Volume feature to accommodate for containerized backup solutions. By mounting the Root Volume into a container that manages your Backups, you can backup *all* of your LizardFS Docker volumes without having to manually add a mount to the container every time you create a new volume that needs to be backed up.
The Root Volume also give you the ability to have containers create and remove LizardFS volumes without having to mount the Docker socket and make Docker API calls. Volumes can be added, removed, and otherwise manipulated simply by mounting the Root Volume and making the desired changes.
### Known Issues
#### Hangs on Unresponsive LizardFS Master
In most cases, when the plugin cannot connect to the LizardFS cluster, the plugin will timeout quickly and simply fail to create mounts or listings of volumes. However, when the plugin *has* been able to open a connection with the LizardFS master, and the LizardFS master subsequently fails to respond, a volume list operation will cause the plugin to hang for a period of time. This will cause any Docker operations that request the volume list to freeze while the plugin attempts to connect to the cluster. To fix the issue, the connectivity to the LizardFS master must be restored, otherwise the plugin should be disabled to prevent stalling the Docker daemon.
## Configuration
### Plugin Configuration
You can configure the plugin through plugin variables. You may set these variables at installation time by putting `VARIABLE_NAME=value` after the plugin name, or you can set them after the plugin has been installed using `docker plugin set kadimasolutions/lizardfs-volume-plugin VARIABLE_NAME=value`.
> **Note:** When configuring the plugin after installation, the plugin must first be disabled before you can set variables. There is no danger of accidentally setting variables while the plugin is enabled, though. Docker will simply tell you that it is not possible.
#### HOST
The hostname/ip address that will be used when connecting to the LizardFS master.
> **Note:** The plugin runs in `host` networking mode. This means that even though it is in a container, it shares its network configuration with the host and should resolve all network addresses as the host system would.
**Default:** `mfsmaster`
#### PORT
The port on which to connect to the LizardFS master.
**Default:** `9421`
#### MOUNT_OPTIONS
Options passed to the `mfsmount` command when mounting LizardFS volumes. More information can be found in the [LizardFS documentation](https://docs.lizardfs.com/man/mfsmount.1.html).
**Default:** empty string
#### REMOTE_PATH
The path on the LizardFS filesystem that Docker volumes will be stored in. This path will be mounted for volume storage by the plugin and must exist on the LizardFS filesystem. The plugin fail to connect to the master server if the path does not exist.
**Default:** `/docker/volumes`
#### ROOT_VOLUME_NAME
The name of the Root Volume. If specified, a special volume will be created of the given name will be created that will contain all of the LizardFS volumes. It is equivalent to mounting the `REMOTE_PATH` on the LizardFS filesystem. See [Root Mount Option](#root-mount-option).
**Default:** empty string
#### CONNECT_TIMEOUT
The timeout for LizardFS mount commands. If a mount takes longer than the `CONNECT_TIMEOUT` in milliseconds, it will be terminated and the volume will not be mounted. This is to keep Docker operations from hanging in the event of an unresponsive master.
**Default:** `10000`
#### LOG_LEVEL
Plugin logging level. Set to `DEBUG` to get more verbose log messages. Logs from Docker plugins can be found in the Docker log and will be suffixed with the plugin ID.
**Default:** `INFO`
### Volume Options
Volume options are options that can be passed to Docker when creating a Docker volume. Volume options are set per volume, therefore setting an option for one volume does not set that option for any other volume.
Volume options can be passed in on the command line by
adding `-o OptionName=value` after the volume name. For example:
$ docker volume create -d lizardfs my-volume -o ReplicationGoal=3
#### ReplicationGoal
The replication goal option can be used to set the LizardFS replication goal on a newly created volume. The goal can be any valid goal name or number that exists on the LizardFS master. See the LizardFS [documentation](https://docs.lizardfs.com/adminguide/replication.html) for more information.
Note that even after a volume has been created and a goal has been set, it is still possible to manually change the goal of the volume directory on the LizardFS filesystem manually. For example, assuming you have mounted the LizardFS filesystem manually ( not using a docker volume ):
lizardfs setgoal goal_name /mnt/mfs/docker/volumes/volume_name
Also, if you want to set a default goal for all of your Docker volumes, you can manually set the goal of the directory containing your docker volumes on the LizardFS filesystem ( `/docker/volumes` by default, see [REMOTE_PATH](#remote-path) ).
**Default:** empty string
# Swarm Deployment
Docker Swarm is where the LizardFS plugin shows its full potential. You can deploy an entire LizardFS cluster *and* the Docker volume plugin as a single stack on you Docker Swarm. This lets you create a shared storage cluster out of any Docker Swarm. There are a few steps to prepare your hosts before launching the stack.
## Usage
### Setup Master
One node in your Swarm cluster needs to have the label `lizardfs.master-personality=master`. This is the node that the LizardFS master will be deployed on.
The master server is also expected to have a directory /lizardfs/mfsmaster on the host that will be used to store the master data. In production this should be the mountpoint for an XFS or ZFS filesystem.
Setup Chunkservers
Every node in the Swarm cluster gets a Chunkserver deployed to it. All servers are expected to have a `/lizardfs/chunkserver` directory that will be used for storing chunks. Like the master storage directory, `/lizardfs/chunkserver` should be formatted XFS or ZFS for production installations.
### ( Optional ) Setup Shadow Masters
You can optionally add the lizardfs.master-personality=shadow label to any nodes in the cluster that you want to run shadow masters on. Shadow master servers should have a /lizardfs/mfsmaster-shadow directory that is mounted to an XFS or ZFS filesystem for storage.
Deploy The LizardFS Stack
> Note: Before you deploy the stack you should make sure that you have disabled the Docker managed version of the LizardFS plugin if it is installed.
After you have provided for the storage for your LizardFS cluster, you can deploy the LizardFS stack to your Swarm cluster by downloading the attached lizardfs.yml and using docker stack deploy -c lizardfs.yml lizardfs. The particular yaml I gave you requires that the name of the stack be lizardfs.
### Deploy the Stack
After you have setup the storage directories for you Swarm cluster you deploy the stack with the following yaml.
$ docker stack deploy -c docker-stack.yml lizardfs
> **Note:** The stack **must** be named `lizardfs` for this yaml. It is because the `docker-run-d` container has the network name `lizardfs_lizardfs` hard-codded into the yaml. Reading the "Swarm Service Privileges Workaround" explanation below will help explain the `docker-run-d` container.
**docker-stack.yml**
```yaml
version: '3.6'
services:
mfsmaster:
image: kadimasolutions/lizardfs:latest
command: master
environment:
MFSMASTER_AUTO_RECOVERY: 1
networks:
- lizardfs
volumes:
- /lizardfs/mfsmaster:/var/lib/mfs
deploy:
mode: global
placement:
constraints:
- node.labels.lizardfs.master-personality==master
mfsmaster-shadow:
image: kadimasolutions/lizardfs:latest
command: master
networks:
- lizardfs
environment:
MFSMASTER_PERSONALITY: shadow
volumes:
- /lizardfs/mfsmaster-shadow:/var/lib/mfs
deploy:
mode: global
placement:
constraints:
- node.labels.lizardfs.master-personality==shadow
chunkserver:
image: kadimasolutions/lizardfs:latest
command: chunkserver
networks:
- lizardfs
environment:
# This lets you run the chunkserver with less available disk space
MFSCHUNKSERVER_HDD_LEAVE_SPACE_DEFAULT: 400Mi # 4Gi is the default
MFSHDD_1: /mnt/mfshdd
volumes:
- /lizardfs/chunkserver:/mnt/mfshdd
deploy:
mode: global
cgiserver:
image: kadimasolutions/lizardfs:latest
command: cgiserver
networks:
- lizardfs
restart: on-failure
ports:
- 8080:80
deploy:
replicas: 0
docker-plugin:
image: kadimasolutions/docker-run-d:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
command:
- "--restart=always -v /var/lib/docker/plugins/lizardfs/propagated-mount:/mnt/docker-volumes/:rshared -v /run/docker/plugins/lizardfs:/run/docker/plugins/ --net lizardfs_lizardfs --cap-add SYS_ADMIN --device=/dev/fuse:/dev/fuse --security-opt=apparmor:unconfined -e ROOT_VOLUME_NAME=lizardfs -e LOG_LEVEL=debug -e REMOTE_PATH=/docker/volumes -e LOCAL_PATH=/var/lib/docker/plugins/lizardfs/propagated-mount -e MOUNT_OPTIONS='-o big_writes -o cacheexpirationtime=500 -o readaheadmaxwindowsize=1024' kadimasolutions/lizardfs-volume-driver"
environment:
CONTAINER_NAME: lizardfs-plugin
deploy:
mode: global
lizardfs-client:
image: kadimasolutions/docker-run-d:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
command:
- "--restart=always --net lizardfs_lizardfs --cap-add SYS_ADMIN --device=/dev/fuse:/dev/fuse --security-opt=apparmor:unconfined kadimasolutions/lizardfs client"
environment:
CONTAINER_NAME: lizardfs-client
deploy:
mode: global
networks:
lizardfs:
attachable: true
```
This will deploy the Docker plugin, the LizardFS chunkserver, and a LizardFS client container on *every* host in your cluster. If you have different goals, you may want to update the scheduling rules to match your particular use case.
The stack uses @kadimasolutions's LizardFS Docker image to create the LizardFS cluster. You can modify the environment variables for the mfsmaster, mfsmaster-shadow, and chunkserver containers to completely configure your LizardFS cluster. Documetnation for the `kadimasolutions/lizardfs` docker image can be found in the [git repo](https://github.com/kadimasolutions/docker_lizardfs).
### Things You Should Know
Here are some things that you should know about the setup.
#### Different Container Image
The new container for deploying the plugin is actually the same software as the Docker managed plugin, but it is under a different repo on DockerHub. The plugin that you install with docker plugin install is under the kadimasolutions/lizardfs-volume-plugin repository. The plugin that you run as a standard Docker container on Swarm is under the kadimasolutions/lizardfs-volume-driver repository ( these may or may not be the final names for either ). The only difference between the two are how they are installed, otherwise they are running the same code.
#### Swarm Service Privileges Workaround
There is a limitation imposed by the Docker daemon on Swarm services that prevents them from running with admin privileges on the host. This is an issue for the LizardFS plugin container because it needs to have the SYS_ADMIN capability along with the FUSE device. In order to work around this I created a very simple container ( kadimasolutions/docker-run-d ) that uses the Docker CLI to run a container that does have privileges. This container can be deployed as a Swarm service to allow you to run privileged swarm containers. This is how the docker-plugin and lizardfs-client services are deployed in the attached yaml.
#### lizardfs-client Convenience Container
As a convenience, the stack will deploy a container named lizardfs-client on every host in your Swarm. This container mounts the root of the LizardFS filesystem to /mnt/mfs and provides the LizardFS CLI tools to allow you to manage your LizardFS filesystem. To access the tools you exec into the lizardfs-client container on any host in your cluster. For example:
$ docker exec -it lizardfs-client bash
root@containerid $ lizardfs setgoal 3 /mnt/mfs/docker/volumes
root@containerid $ exit
This removes the need to install any LizardFS tools on your hosts.
### Known Issues
#### Docker Restart Issue
> **Note:** This is only a concern when using the Swarm deployment. It is not a problem when using the Docker managed version of the plguin.
When the Docker daemon is started it checks to make sure that all of your LizardFS volumes exist and it tries to connect to the LizardFS Docker plugin. Because I am running the plugin in a Docker container, the Docker daemon cannot connect to the plugin as the daemon is still starting up and the plugin container has not been started yet. Unfortunately, Docker will spend about 15 seconds timing out for each lizardfs volume before it finishes starting up. This can push your Docker daemon startup time up by several minutes if you have a lot of LizardFS volumes. After it finishes timing out for each volume, the Docker daemon starts up and everything works as you would expect.
This doesn’t cause any critical issues it just takes longer to start Docker because of all of the timeouts. Another option that I’ve speculated is to run two Docker daemons on each host in the cluster and create a dedicated Swarm cluster just for LizardFS. This would be more of a specialized setup, but I think it would still work. In the end I think that the method of deployment will depend on the individual user’s needs. Eventually I may try to test and document more deployment methods

+ 13
- 0
go.mod View File

@ -0,0 +1,13 @@
module lizardfs-volume-plugin
go 1.14
require (
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc
github.com/ramr/go-reaper v0.2.0
github.com/sirupsen/logrus v1.6.0
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect
)

+ 34
- 0
go.sum View File

@ -0,0 +1,34 @@
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc h1:/A+mPcpajLsWiX9gSnzdVKM/IzZoYiNqXHe83z50k2c=
github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/ramr/go-reaper v0.2.0 h1:hhGZ1SRZ9fJfSEf9e14hRB4O0MafRwHK5O33j70qTNI=
github.com/ramr/go-reaper v0.2.0/go.mod h1:DFg2AhfQCvkJwRKUfsycOSSZELGBA9gt46ne3SOecJM=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

+ 344
- 0
main.go View File

@ -0,0 +1,344 @@
package main
import (
"context"
"errors"
"fmt"
"github.com/docker/go-plugins-helpers/volume"
"github.com/ramr/go-reaper"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"time"
)
const socketAddress = "/run/docker/plugins/lizardfs310.sock"
const containerVolumePath = "/mnt/docker-volumes"
const hostVolumePath = "/mnt/docker-volumes"
const volumeRoot = "/mnt/lizardfs/"
var host = os.Getenv("HOST")
var port = os.Getenv("PORT")
var remotePath = os.Getenv("REMOTE_PATH")
var mountOptions = os.Getenv("MOUNT_OPTIONS")
var rootVolumeName = os.Getenv("ROOT_VOLUME_NAME")
var connectTimeoutStr = os.Getenv("CONNECT_TIMEOUT")
var connectTimeout = 3000
var mounted = make(map[string][]string)
type lizardfsVolume struct {
Name string
Goal int
Path string
}
type lizardfsDriver struct {
volumes map[string]*lizardfsVolume
statePath string
}
func (l lizardfsDriver) Create(request *volume.CreateRequest) error {
log.WithField("method", "create").Debugf("%#v", l)
volumeName := request.Name
volumePath := fmt.Sprintf("%s%s", volumeRoot, volumeName)
replicationGoal := request.Options["ReplicationGoal"]
if volumeName == rootVolumeName {
log.Warning("tried to create a volume with same name as root volume. Ignoring request.")
}
errs := make(chan error, 1)
go func() {
err := os.MkdirAll(volumePath, 760)
errs <- err
}()
select {
case err := <-errs:
if err != nil {
return err
}
case <-time.After(time.Duration(connectTimeout) * time.Millisecond):
return errors.New("create operation timeout")
}
_, err := strconv.Atoi(replicationGoal)
if err == nil {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(connectTimeout)*time.Millisecond)
defer cancel()
cmd := exec.CommandContext(ctx, "lizardfs", "setgoal", "-r", replicationGoal, volumePath)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 1}
err = cmd.Start()
if err != nil {
return err
}
err = cmd.Wait()
if err != nil {
log.Error(err)
}
}
return nil
}
func (l lizardfsDriver) List() (*volume.ListResponse, error) {
log.WithField("method", "list").Debugf("")
volumes := make(chan []*volume.Volume, 1)
errs := make(chan error, 1)
go func() {
var vols []*volume.Volume
directories, err := ioutil.ReadDir(volumeRoot)
if err != nil {
errs <- err
}
for _, directory := range directories {
if len(mounted[directory.Name()]) == 0 {
vols = append(vols, &volume.Volume{Name: directory.Name()})
} else {
vols = append(vols, &volume.Volume{Name: directory.Name(), Mountpoint: path.Join(hostVolumePath, directory.Name())})
}
}
if rootVolumeName != "" {
if len(mounted[rootVolumeName]) == 0 {
vols = append(vols, &volume.Volume{Name: rootVolumeName})
} else {
vols = append(vols, &volume.Volume{Name: rootVolumeName, Mountpoint: path.Join(hostVolumePath, rootVolumeName)})
}
}
volumes <- vols
}()
select {
case res := <-volumes:
return &volume.ListResponse{Volumes: res}, nil
case err := <-errs:
return nil, err
case <-time.After(time.Duration(connectTimeout) * time.Millisecond):
return nil, errors.New("list operation timeout")
}
}
func (l lizardfsDriver) Get(request *volume.GetRequest) (*volume.GetResponse, error) {
log.WithField("method", "get").Debugf("")
volumeName := request.Name
volumePath := volumeRoot
if volumeName != rootVolumeName {
volumePath = fmt.Sprintf("%s%s", volumeRoot, volumeName)
}
errs := make(chan error, 1)
go func() {
if _, err := os.Stat(volumePath); os.IsNotExist(err) {
errs <- err
} else {
errs <- nil
}
}()
select {
case err := <-errs:
if err != nil {
return nil, err
} else {
return &volume.GetResponse{Volume: &volume.Volume{Name: volumeName, Mountpoint: volumePath}}, nil
}
case <-time.After(time.Duration(connectTimeout) * time.Millisecond):
return nil, errors.New("get operation timeout")
}
}
func (l lizardfsDriver) Remove(request *volume.RemoveRequest) error {
log.WithField("method", "remove").Debugf("")
volumeName := request.Name
volumePath := fmt.Sprintf("%s%s", volumeRoot, volumeName)
if volumeName == rootVolumeName {
return fmt.Errorf("can't remove root volume %s", rootVolumeName)
}
err := os.RemoveAll(volumePath)
return err
}
func (l lizardfsDriver) Path(request *volume.PathRequest) (*volume.PathResponse, error) {
log.WithField("method", "path").Debugf("")
var volumeName = request.Name
var hostMountpoint = path.Join(hostVolumePath, volumeName)
if len(mounted[volumeName]) == 0 {
return &volume.PathResponse{Mountpoint: hostMountpoint}, nil
}
return &volume.PathResponse{}, nil
}
func (l lizardfsDriver) Mount(request *volume.MountRequest) (*volume.MountResponse, error) {
log.WithField("method", "mount").Debugf("")
var volumeName = request.Name
var mountID = request.ID
var containerMountpoint = path.Join(containerVolumePath, volumeName)
var hostMountpoint = path.Join(hostVolumePath, volumeName)
if len(mounted[volumeName]) == 0 {
err := os.MkdirAll(containerMountpoint, 760)
if err != nil && err != os.ErrExist {
return nil, err
}
mountRemotePath := remotePath
if volumeName != rootVolumeName {
mountRemotePath = path.Join(remotePath, volumeName)
}
params := []string{ "-o", "mfsmaster="+host, "-o", "mfsport="+port, "-o", "mfssubfolder="+mountRemotePath}
if mountOptions != "" {
params = append(params, strings.Split(mountOptions, " ")...)
}
params = append(params, []string{containerMountpoint}...)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(connectTimeout)*time.Millisecond)
defer cancel()
cmd := exec.CommandContext(ctx, "mfsmount", params...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 1}
err = cmd.Start()
if err != nil {
return nil, err
}
err = cmd.Wait()
if err != nil {
log.Error(err)
}
mounted[volumeName] = append(mounted[volumeName], mountID)
return &volume.MountResponse{Mountpoint: hostMountpoint}, nil
} else {
return &volume.MountResponse{Mountpoint: hostMountpoint}, nil
}
}
func indexOf(word string, data []string) int {
for k, v := range data {
if word == v {
return k
}
}
return -1
}
func (l lizardfsDriver) Unmount(request *volume.UnmountRequest) error {
log.WithField("method", "unmount").Debugf("")
var volumeName = request.Name
var mountID = request.ID
var containerMountpoint = path.Join(containerVolumePath, volumeName)
index := indexOf(mountID, mounted[volumeName])
if index > -1 {
mounted[volumeName] = append(mounted[volumeName][:index], mounted[volumeName][index+1:]...)
}
if len(mounted[volumeName]) == 0 {
output, err := exec.Command("umount", containerMountpoint).CombinedOutput()
if err != nil {
log.Error(string(output))
return err
}
log.Debug(string(output))
return nil
}
return nil
}
func (l lizardfsDriver) Capabilities() *volume.CapabilitiesResponse {
log.WithField("method", "capabilities").Debugf("")
return &volume.CapabilitiesResponse{Capabilities: volume.Capability{Scope: "global"}}
}
func newLizardfsDriver(root string) (*lizardfsDriver, error) {
log.WithField("method", "new driver").Debug(root)
d := &lizardfsDriver{
volumes: map[string]*lizardfsVolume{},
}
return d, nil
}
func initClient() {
log.WithField("host", host).WithField("port", port).WithField("remote path", remotePath).Info("initializing client")
err := os.MkdirAll(volumeRoot, 760)
if err != nil {
log.Error(err)
}
params := []string{"-o", "mfsmaster="+host, "-o", "mfsport="+port, "-o", "mfssubfolder="+remotePath}
if mountOptions != "" {
params = append(params, strings.Split(mountOptions, " ")...)
}
params = append(params, []string{volumeRoot}...)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(connectTimeout)*time.Millisecond)
defer cancel()
output, err := exec.CommandContext(ctx, "mfsmount", params...).CombinedOutput()
if err != nil {
log.Error(string(output))
log.Fatal(err)
}
log.Debug(string(output))
}
func startReaperWorker() {
// See related issue in go-reaper https://github.com/ramr/go-reaper/issues/11
if _, hasReaper := os.LookupEnv("REAPER"); !hasReaper {
go reaper.Reap()
args := append(os.Args, "#worker")
pwd, err := os.Getwd()
if err != nil {
panic(err)
}
workerEnv := []string{fmt.Sprintf("REAPER=%d", os.Getpid())}
var wstatus syscall.WaitStatus
pattrs := &syscall.ProcAttr{
Dir: pwd,
Env: append(os.Environ(), workerEnv...),
Sys: &syscall.SysProcAttr{Setsid: true},
Files: []uintptr{0, 1, 2},
}
workerPid, _ := syscall.ForkExec(args[0], args, pattrs)
_, err = syscall.Wait4(workerPid, &wstatus, 0, nil)
for syscall.EINTR == err {
_, err = syscall.Wait4(workerPid, &wstatus, 0, nil)
}
}
}
func main() {
logLevel, err := log.ParseLevel(os.Getenv("LOG_LEVEL"))
if err != nil {
log.SetLevel(log.InfoLevel)
} else {
log.SetLevel(logLevel)
}
log.Debugf("log level set to %s", log.GetLevel())
startReaperWorker()
connectTimeout, err = strconv.Atoi(connectTimeoutStr)
if err != nil {
log.Errorf("failed to parse timeout with error %v. Assuming default %v", err, connectTimeout)
}
initClient()
d, err := newLizardfsDriver("/mnt")
if err != nil {
log.Fatal(err)
}
h := volume.NewHandler(d)
log.Infof("listening on %s", socketAddress)
log.Error(h.ServeUnix(socketAddress, 0))
}

+ 4
- 0
run-tests.sh View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
docker run -it --rm --privileged \
-v $(pwd)/plugin:/plugin \
lizardfs-volume-plugin_test $@

+ 34
- 0
test/Dockerfile View File

@ -0,0 +1,34 @@
FROM docker:stable-dind
# Install dependencies
RUN apk add --no-cache bash curl jq python3 wget docker-compose go
# Create our working directory
RUN mkdir /project
# Switch to our working directory
WORKDIR /project
# Pull the LizardFS image used for creating the test environment
RUN wget https://raw.githubusercontent.com/moby/moby/master/contrib/download-frozen-image-v2.sh -O /download-image.sh
RUN chmod 744 /download-image.sh
RUN mkdir -p /images/lizardfs
RUN /download-image.sh /images/lizardfs kadimasolutions/lizardfs:latest
# Copy in the docker compose file that we will use to create test LizardFS
# clusters
COPY ./docker-compose.yml /project/
# Copy in the test scripts
COPY ./test-environment.sh /test-environment.sh
RUN chmod 744 /test-environment.sh
COPY ./test-run.sh /test-run.sh
RUN chmod 744 /test-run.sh
# Copy in our entrypoint script
COPY ./docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod 744 /docker-entrypoint.sh
# Set the entrypoint
ENTRYPOINT ["/docker-entrypoint.sh"]

+ 31
- 0
test/docker-compose.yml View File

@ -0,0 +1,31 @@
version: '3'
services:
mfsmaster:
image: kadimasolutions/lizardfs
command: master
restart: on-failure
volumes:
- /var/lib/mfs
ports:
- ${MASTER_PORT}:9421
chunkserver:
image: kadimasolutions/lizardfs
command: chunkserver
restart: on-failure
environment:
# This lets you run the chunkserver with less available disk space
MFSCHUNKSERVER_HDD_LEAVE_SPACE_DEFAULT: 20Mi # 4Gi is the default
MFSHDD_1: /mnt/mfshdd
volumes:
- /mnt/mfshdd
client:
image: kadimasolutions/lizardfs
command: client /mnt/mfs
restart: on-failure
# Required permissions and devices for container to mount filesystem
cap_add:
- SYS_ADMIN
devices:
- /dev/fuse:/dev/fuse
security_opt:
- apparmor:unconfined

+ 11
- 0
test/docker-entrypoint.sh View File

@ -0,0 +1,11 @@
#!/bin/sh
image_tag=$1
log_prefix="[Root]"
echo "$log_prefix Creating Test Environment"
/test-environment.sh $image_tag
echo "$log_prefix Running Tests"
/test-run.sh

+ 27
- 0
test/test-environment.sh View File

@ -0,0 +1,27 @@
#!/bin/sh
image_tag=$1
log_prefix="[Plugin Environment]"
echo "$log_prefix Starting Docker"
dockerd-entrypoint.sh 2> /var/log/docker.log &
echo $! > /run/dockerd-entrypoint.pid
# Wait for Docker to startup
while ! docker ps > /var/log/docker.log; do
sleep 1
done
echo "$log_prefix Docker finished startup"
echo "$log_prefix Loading baked LizardFS image"
tar -cC '/images/lizardfs' . | docker load
# Install plugin
if [ -z "$image_tag" ]; then
echo "$log_prefix Installing plugin from local dir"
docker plugin create lizardfs /plugin
else
echo "$log_prefix Installing Plugin from DockerHub: $image_tag"
docker plugin install --alias lizardfs --grant-all-permissions --disable $image_tag
fi

+ 247
- 0
test/test-run.sh View File

@ -0,0 +1,247 @@
#!/bin/sh
####
# Plugin Test Cases
####
log_prefix="[Plugin Test]"
# Start a LizardFS cluster for the plugin to connect to
# Set the LizardFS master port
echo "MASTER_PORT=9421" > .env
echo "$log_prefix Starting up local LizardFS cluster"
docker-compose down -v
docker-compose up -d
echo "$log_prefix Creating volume directory on LizardFS filesystem"
docker-compose exec client mkdir -p /mnt/mfs/docker/volumes
# Configure and enable plugin
echo "$log_prefix Configurin plugin to connect to 127.0.0.1:9421"
docker plugin disable lizardfs 2> /dev/null
docker plugin set lizardfs HOST=127.0.0.1 && \
docker plugin set lizardfs PORT=9421 && \
docker plugin set lizardfs REMOTE_PATH=/docker/volumes && \
docker plugin set lizardfs ROOT_VOLUME_NAME="" && \
docker plugin set lizardfs MOUNT_OPTIONS="" && \
docker plugin set lizardfs CONNECT_TIMEOUT=10000 && \
docker plugin set lizardfs LOG_LEVEL=info && \
docker plugin enable lizardfs
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Create volumes and make sure that they exist
echo "$log_prefix Create volume: lizardfs-volume-1" && \
docker volume create --driver lizardfs lizardfs-volume-1 && \
\
echo "$log_prefix Make sure lizardfs-volume-1 exists in volume list" && \
docker volume ls | grep "lizardfs.*lizardfs-volume-1" && \
\
echo "$log_prefix Make sure lizardfs-volume-1 exists on LizardFS filesystem" && \
docker-compose exec client ls /mnt/mfs/docker/volumes | grep lizardfs-volume-1 && \
\
echo "$log_prefix Create a second volume: lizardfs-volume-2" && \
docker volume create --driver lizardfs lizardfs-volume-2 && \
\
echo "$log_prefix Make sure lizardfs-volume-2 exists" && \
docker volume ls | grep "lizardfs.*lizardfs-volume-2" && \
\
echo "$log_prefix Make sure lizardfs-volume-1 still exists" && \
docker volume ls | grep "lizardfs.*lizardfs-volume-1"
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Store data in a volume and make sure that the data is persisted
echo "$log_prefix Create test data on lizardfs-volume-1" && \
docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \
kadimasolutions/lizardfs -c 'echo "Hello World" > /data/test-data.txt' && \
\
echo "$log_prefix Make sure data exists in volume" && \
docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=cat \
kadimasolutions/lizardfs /data/test-data.txt | grep "Hello World" && \
\
echo "$log_prefix Make sure data exists on LizardFS filesystem" && \
docker-compose exec client cat \
/mnt/mfs/docker/volumes/lizardfs-volume-1/test-data.txt | grep "Hello World"
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Mount a volume into multiple containers, then remove the containers,
# and remount
echo "$log_prefix Mount lizardfs-volume-1 into container1 and container2" && \
docker run -d --name container1 -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \
kadimasolutions/lizardfs && \
\
docker run -d --name container2 -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \
kadimasolutions/lizardfs && \
\
echo "$log_prefix Make sure data exists in container1" && \
docker exec -it container1 cat /data/test-data.txt | grep "Hello World" && \
\
echo "$log_prefix Make sure data exists in container2" && \
docker exec -it container2 cat /data/test-data.txt | grep "Hello World" && \
\
echo "$log_prefix Remove container1" && \
docker stop container1 && \
\
echo "$log_prefix Make sure data still exists in container2" && \
docker exec -it container2 cat /data/test-data.txt | grep "Hello World" && \
\
echo "$log_prefix Remove container2" && \
docker stop container2 && \
\
echo "$log_prefix Make sure lizardfs-volume-1 can still be mounted into a new container" && \
docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=cat \
kadimasolutions/lizardfs /data/test-data.txt | grep "Hello World"
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Create a volume with a specified replication goal and check that it is set
# when the volume is created
echo "$log_prefix Create lizardfs-volume-3 with a replication goal of '3'" && \
docker volume create --driver lizardfs lizardfs-volume-3 -o ReplicationGoal=3 && \
\
echo "$log_prefix Make sure that the volume has a replication goal of '3'" && \
docker-compose exec \
client lizardfs getgoal /mnt/mfs/docker/volumes/lizardfs-volume-3 | \
grep ".*lizardfs-volume-3: 3"
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Bring down the cluster
echo "$log_prefix Bringing down LizardFS cluster" && \
echo "$log_prefix Remove volumes" && \
docker volume rm lizardfs-volume-1 && \
docker volume rm lizardfs-volume-2 && \
docker volume rm lizardfs-volume-3 && \
echo "$log_prefix Remove LizardFS cluster" && \
docker-compose down -v && \
echo "$log_prefix Disable plugin" && \
docker plugin disable -f lizardfs
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Test connecting to cluster on a different port, storage directory, with mount
# options, and with the root volume name set
echo "MASTER_PORT=9900" > .env
echo "$log_prefix Creating cluster with master port 9900" && \
docker-compose up -d && \
\
echo "$log_prefix Creating storage directory, /alternate-volumes, on LizardFS filesystem" && \
docker-compose exec client mkdir -p /mnt/mfs/alternate-volumes && \
\
echo "$log_prefix Enabling plugin with PORT=9900, REMOTE_PATH=/alternate-volumes," && \
echo "$log_prefix MOUNT_OPTIONS='-o allow_other', and ROOT_VOLUME_NAME=lizardfs" && \
docker plugin set lizardfs PORT=9900 REMOTE_PATH=/alternate-volumes \
MOUNT_OPTIONS='-o allow_other' ROOT_VOLUME_NAME=lizardfs && \
docker plugin enable lizardfs && \
\
echo "$log_prefix Create volume 'volume-on-different-port' to test connection" && \
docker volume create --driver lizardfs volume-on-different-port && \
\
echo "$log_prefix Make sure volume-on-different-port exists in volume list" && \
docker volume ls | grep "lizardfs.*volume-on-different-port" && \
\
echo "$log_prefix Make sure that the mount options are getting set" && \
ps -ef | grep "allow_other" | grep -v "grep" && \
\
echo "$log_prefix Remove volume: volume-on-different-port" && \
docker volume rm volume-on-different-port
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Run tests for the Root Volume
echo "$log_prefix Create volumes: liz-1, liz-2" && \
docker volume create --driver lizardfs liz-1 && \
docker volume create --driver lizardfs liz-2 && \
\
echo "$log_prefix Add test-files liz-1, liz-2" && \
docker run -it --rm -v liz-1:/data --entrypoint=touch \
kadimasolutions/lizardfs /data/liz-1.txt && \
docker run -it --rm -v liz-2:/data --entrypoint=touch \
kadimasolutions/lizardfs /data/liz-2.txt && \
\
echo "$log_prefix Mount Root Volume and make sure liz-1, liz-2, and their files are in it" && \
docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \
kadimasolutions/lizardfs /lizardfs/liz-1 | grep "liz-1.txt" && \
docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \
kadimasolutions/lizardfs /lizardfs/liz-2 | grep "liz-2.txt" && \
\
echo "$log_prefix Create a new directory, liz-3, in the Root Volume" && \
docker run -it --rm -v lizardfs:/lizardfs --entrypoint=mkdir \
kadimasolutions/lizardfs /lizardfs/liz-3 && \
\
echo "$log_prefix Make sure the new directory registers in the volume list" && \
docker volume ls | grep "lizardfs.*liz-3" && \
\
echo "$log_prefix Create a volume with the same name as the Root Volume" && \
docker run -it --rm -v lizardfs:/lizardfs --entrypoint=mkdir \
kadimasolutions/lizardfs /lizardfs/lizardfs && \
\
echo "$log_prefix Make sure that the Root Volume takes precedence when mounting" && \
docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \
kadimasolutions/lizardfs /lizardfs/liz-1 | grep "liz-1.txt"
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
echo "$log_prefix Make sure you can't delete the Root Volume" &&
docker volume rm lizardfs
if [ $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
echo "$log_prefix Make sure all volumes still exist after attempting to delete the Root Volume"
docker volume ls | grep "lizardfs.*liz-1" && \
docker volume ls | grep "lizardfs.*liz-2" && \
docker volume ls | grep "lizardfs.*liz-3" && \
\
echo "$log_prefix Delete the volumes" && \
docker volume rm liz-1 && \
docker volume rm liz-2 && \
docker volume rm liz-3
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Test setting the log level
plugin_id=$(docker plugin ls | grep lizardfs | awk '{print $1}')
echo "$log_prefix Test a 'docker volume ls'" && \
docker volume ls
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
echo "$log_prefix Make sure plugin is not logging DEBUG messages"
cat /var/log/docker.log | grep $plugin_id | tail -n 1 | grep -i DEBUG
if [ $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
echo "$log_prefix Set log level to 'DEBUG'" && \
docker plugin disable -f lizardfs && \
docker plugin set lizardfs LOG_LEVEL=DEBUG && \
docker plugin enable lizardfs && \
\
echo "$log_prefix Test a 'docker volume ls'" && \
docker volume ls && \
\
echo "$log_prefix Make Sure that the plugin does log a DEBUG message" && \
cat /var/log/docker.log | grep $plugin_id | tail -n 1 | grep -i DEBUG
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
# Bring down the cluster
echo "$log_prefix Remove LizardFS cluster" && \
docker-compose down -v
if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi
echo "$log_prefix ALL DONE. SUCCESS!"

Loading…
Cancel
Save