Commit 3ef231c3 authored by Christian Jürges's avatar Christian Jürges
Browse files

added flags

added simple acls (not complete yet)
parent b5b23c21
Pipeline #417 failed with stages
This diff is collapsed.
......@@ -29,7 +29,7 @@ func (c *command) Run(arg ...string) ([][]string, error) {
if os.Geteuid() == 0 {
cmd = exec.Command(c.Command, arg...)
} else {
args := append([]string{c.Command}, arg...)
args := append([]string{"/usr/local/bin/" + c.Command}, arg...)
cmd = exec.Command("sudo", args...)
}
......
package main
import "path"
func isAllowed(p string) bool {
var ok = false
for _, v := range opts.Acls {
if ok, _ = path.Match(v, p); ok {
return ok
}
}
return ok
}
package main
import (
"bufio"
"fmt"
"net"
"os"
)
type NotifyStruct struct {
Type string `json:"type"`
Content string `json:"content"`
}
var messages chan string
func startNofifiyServer() {
// Number of people whom ever connected
//
clientCount := 0
// All people who are connected; a map wherein
// the keys are net.Conn objects and the values
// are client "ids", an integer.
//
allClients := make(map[net.Conn]int)
// Channel into which the TCP server will push
// new connections.
//
newConnections := make(chan net.Conn)
// Channel into which we'll push dead connections
// for removal from allClients.
//
deadConnections := make(chan net.Conn)
// Channel into which we'll push messages from
// connected clients so that we can broadcast them
// to every connection in allClients.
//
messages = make(chan string)
// Start the TCP server
//
server, err := net.Listen("tcp", ":6000")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Tell the server to accept connections forever
// and push new connections into the newConnections channel.
//
go func() {
for {
conn, err := server.Accept()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
newConnections <- conn
}
}()
// Loop endlessly
//
for {
// Handle 1) new connections; 2) dead connections;
// and, 3) broadcast messages.
//
select {
// Accept new clients
//
case conn := <-newConnections:
log.Infof("Accepted new client, #%d", clientCount)
// Add this connection to the `allClients` map
//
allClients[conn] = clientCount
clientCount += 1
// Constantly read incoming messages from this
// client in a goroutine and push those onto
// the messages channel for broadcast to others.
//
go func(conn net.Conn, clientId int) {
reader := bufio.NewReader(conn)
for {
incoming, err := reader.ReadString('\n')
if err != nil {
break
}
messages <- fmt.Sprintf("Client %d > %s", clientId, incoming)
}
// When we encouter `err` reading, send this
// connection to `deadConnections` for removal.
//
deadConnections <- conn
}(conn, allClients[conn])
// Accept messages from connected clients
//
case message := <-messages:
// Loop over all connected clients
//
for conn, _ := range allClients {
// Send them a message in a go-routine
// so that the network operation doesn't block
//
go func(conn net.Conn, message string) {
_, err := conn.Write([]byte(message))
// If there was an error communicating
// with them, the connection is dead.
if err != nil {
deadConnections <- conn
}
}(conn, message)
}
log.Infof("New message: %s", message)
log.Infof("Broadcast to %d clients", len(allClients))
// Remove dead clients
//
case conn := <-deadConnections:
log.Infof("Client %d disconnected", allClients[conn])
delete(allClients, conn)
}
}
}
# For a quick start check out our HTTP Requests collection (Tools|HTTP Client|Open HTTP Requests Collection).
#
# Following HTTP Request Live Templates are available:
# * 'gtrp' and 'gtr' create a GET request with or without query parameters;
# * 'ptr' and 'ptrp' create a POST request with a simple or parameter-like body;
# * 'mptr' and 'fptr' create a POST request to submit a form with a text or file field (multipart/form-data);
GET http://localhost:8000/api/storage/v1/pools
Accept: application/json
###
POST http://192.168.99.9:8000/api/storage/v1/pools/pool01/filesystems
Content-Type: application/json
{
"name": "ocnc/data_quota/arcus"
}
###
DELETE http://192.168.99.9:8000/api/storage/v1/pools/pool01/filesystems/ocnc/data_quota/arcus
###
GET http://192.168.99.9:8000/api/storage/v1/pools
Accept: application/json
###
###
POST http://192.168.99.9:8000/api/storage/v1/pools/pool01/filesystems
Content-Type: application/json
{
"name": "ocnc/data_quota/regen"
}
###
DELETE http://192.168.99.9:8000/api/storage/v1/pools/pool01/filesystems/ocnc/data_quota/regen
###
......@@ -8,8 +8,9 @@ import (
"fmt"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/jessevdk/go-flags"
"github.com/op/go-logging"
"io/ioutil"
"log"
"net/http"
"os"
"path"
......@@ -20,7 +21,38 @@ type GenericMessage struct {
Message string `json:"message"`
}
var log = logging.MustGetLogger("zfsrest")
var logFormat = logging.MustStringFormatter(
`%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`,
)
// struct of options
var opts struct {
LogLevel string `long:"loglevel" description:"set log level" required:"false" default:"info"`
Acls []string `long:"allowedpath" short:"a" description:"list of allowed zfs dataset paths allowed for create, destroy, quota changes"`
}
func main() {
// parse flags
_, err := flags.Parse(&opts)
if err != nil {
os.Exit(1)
}
// set logger
logLevel, err := logging.LogLevel(opts.LogLevel)
if err != nil {
panic(err)
}
logBackend1 := logging.NewLogBackend(os.Stderr, "", 0)
backend1Formatter := logging.NewBackendFormatter(logBackend1, logFormat)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(logLevel, "")
logging.SetBackend(backend1Leveled)
router := mux.NewRouter()
// handle pools
......@@ -37,6 +69,8 @@ func main() {
// handle volumes
router.HandleFunc("/api/storage/v1/pools/{path}/volumes", volumes).Methods("GET")
go startNofifiyServer()
loggedRouter := handlers.LoggingHandler(os.Stdout, router)
log.Fatal(http.ListenAndServe(":8000", loggedRouter))
......@@ -47,6 +81,7 @@ func pools(w http.ResponseWriter, r *http.Request) {
out, err := zfs.ListZpools()
var jp jsonPools
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
......@@ -58,17 +93,21 @@ func pools(w http.ResponseWriter, r *http.Request) {
jp.Pools = append(jp.Pools, j)
}
json.NewEncoder(w).Encode(jp)
}
}
func pool(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
out, err := zfs.GetZpool(vars["path"])
if err != nil {
w.WriteHeader(http.StatusNotFound)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
} else {
var jp jsonPool
jp.Pool.Name = out.Name
......@@ -87,9 +126,11 @@ func datasets(w http.ResponseWriter, r *http.Request) {
out, err := zfs.Datasets(vars["path"])
var ds jsonDatasets
if err != nil {
w.WriteHeader(http.StatusNotFound)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
} else {
for _, d := range out {
var v jsonDatasetProps
......@@ -109,6 +150,7 @@ func datasets(w http.ResponseWriter, r *http.Request) {
ds.Volumes = append(ds.Volumes, v)
}
json.NewEncoder(w).Encode(ds)
w.WriteHeader(http.StatusOK)
}
}
......@@ -119,9 +161,11 @@ func volumes(w http.ResponseWriter, r *http.Request) {
out, err := zfs.Volumes(vars["path"])
var ds jsonDatasets
if err != nil {
w.WriteHeader(http.StatusNotFound)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
} else {
for _, d := range out {
var v jsonDatasetProps
......@@ -152,9 +196,11 @@ func filesystems(w http.ResponseWriter, r *http.Request) {
out, err := zfs.Filesystems(vars["path"])
var ds jsonDatasets
if err != nil {
w.WriteHeader(http.StatusNotFound)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
} else {
for _, d := range out {
var v jsonDatasetProps
......@@ -183,12 +229,11 @@ func getdataset(w http.ResponseWriter, r *http.Request) {
out, err := zfs.GetDataset(fmt.Sprintf("%s/%s", vars["path"], vars["dataset"]))
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
w.WriteHeader(http.StatusBadRequest)
} else {
var ds jsonDatasetProps
ds.Pool = vars["path"]
ds.Name = out.Name
......@@ -221,6 +266,7 @@ func createfilesystem(w http.ResponseWriter, r *http.Request) {
var cp createDatasetProps
err := json.Unmarshal(body, &cp)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
json.NewEncoder(w).Encode(s)
} else {
......@@ -229,40 +275,58 @@ func createfilesystem(w http.ResponseWriter, r *http.Request) {
if cp.Quota != "" {
properties["quota"] = cp.Quota
}
out, err := zfs.CreateFilesystem(fmt.Sprintf("%s/%s", vars["path"], cp.Name), properties)
if err != nil {
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(s)
} else {
var chownPath = path.Join("/", vars["path"], cp.Name)
err := chown(chownPath, cp.Owner)
var dsPath = fmt.Sprintf("%s/%s", vars["path"], cp.Name)
if isAllowed(dsPath) {
out, err := zfs.CreateFilesystem(dsPath, properties)
if err != nil {
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
json.NewEncoder(w).Encode(s)
} else {
var ds jsonFileSystem
ds.Filesystem.Pool = vars["path"]
ds.Filesystem.Name = out.Name
ds.Filesystem.Origin = out.Origin
ds.Filesystem.Used = out.Used
ds.Filesystem.Logicalused = out.Logicalused
ds.Filesystem.Avail = out.Avail
ds.Filesystem.Compression = out.Compression
ds.Filesystem.Mountpoint = out.Mountpoint
ds.Filesystem.Quota = out.Quota
ds.Filesystem.Type = out.Type
ds.Filesystem.Referenced = out.Referenced
ds.Filesystem.Usedbydataset = out.Usedbydataset
ds.Filesystem.Written = out.Written
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(ds)
// chown if owner is set
if cp.Owner != "" {
var chownPath = path.Join("/", vars["path"], cp.Name)
err := chown(chownPath, cp.Owner)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
json.NewEncoder(w).Encode(s)
}
}
// return created dataset
if err == nil {
w.WriteHeader(http.StatusCreated)
var ds jsonFileSystem
ds.Filesystem.Pool = vars["path"]
ds.Filesystem.Name = out.Name
ds.Filesystem.Origin = out.Origin
ds.Filesystem.Used = out.Used
ds.Filesystem.Logicalused = out.Logicalused
ds.Filesystem.Avail = out.Avail
ds.Filesystem.Compression = out.Compression
ds.Filesystem.Mountpoint = out.Mountpoint
ds.Filesystem.Quota = out.Quota
ds.Filesystem.Type = out.Type
ds.Filesystem.Referenced = out.Referenced
ds.Filesystem.Usedbydataset = out.Usedbydataset
ds.Filesystem.Written = out.Written
json.NewEncoder(w).Encode(ds)
// create notify message
var nm NotifyStruct
nm.Type = "create"
nm.Content = dsPath
nmb, err := json.Marshal(nm)
if err != nil {
panic(err)
}
messages <- fmt.Sprintf("%s\n", nmb)
}
}
} else {
w.WriteHeader(http.StatusForbidden)
s := fmt.Sprintf("%s", "acl does not allow to create "+dsPath)
json.NewEncoder(w).Encode(s)
}
}
}
......@@ -277,66 +341,78 @@ func modifyfilesystem(w http.ResponseWriter, r *http.Request) {
m := GenericMessage{s}
json.NewEncoder(w).Encode(m)
} else {
var cp modifyDatasetProps
err := json.Unmarshal(body, &cp)
if err != nil {
s := fmt.Sprintf("%s", err)
json.NewEncoder(w).Encode(s)
} else {
out, err := zfs.GetDataset(fmt.Sprintf("%s/%s", vars["path"], vars["dataset"]))
var dsPath = fmt.Sprintf("%s/%s", vars["path"], vars["dataset"])
if isAllowed(dsPath) {
var cp modifyDatasetProps
err := json.Unmarshal(body, &cp)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(s)
} else {
err = nil
if cp.Quota != "" {
err = out.SetProperty("quota", cp.Quota)
}
if err == nil {
if cp.Name != "" {
if cp.Name != vars["dataset"] {
out, err = out.Rename(fmt.Sprintf("%s/%s", vars["path"], cp.Name), false, false)
}
}
}
// var dsPath = fmt.Sprintf("%s/%s", vars["path"], vars["dataset"])
out, err := zfs.GetDataset(dsPath)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(s)
} else {
w.WriteHeader(http.StatusAccepted)
err = nil
if cp.Quota != "" {
err = out.SetProperty("quota", cp.Quota)
}
if err == nil {
if cp.Name != "" {
if cp.Name != vars["dataset"] {
out, err = out.Rename(fmt.Sprintf("%s/%s", vars["path"], cp.Name), false, false)
}
}
}
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
json.NewEncoder(w).Encode(s)
} else {
w.WriteHeader(http.StatusAccepted)
}
}
}
} else {
w.WriteHeader(http.StatusForbidden)
s := fmt.Sprintf("%s", "access denied "+dsPath)
json.NewEncoder(w).Encode(s)
}
}
}
func deletefilesystem(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
out, err := zfs.GetDataset(fmt.Sprintf("%s/%s", vars["path"], vars["dataset"]))
var dsPath = fmt.Sprintf("%s/%s", vars["path"], vars["dataset"])
out, err := zfs.GetDataset(dsPath)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(s)
} else {
err = out.Destroy(zfs.DestroyDefault)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
s := fmt.Sprintf("%s", err)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(s)
} else {
w.WriteHeader(http.StatusNoContent)
var nm NotifyStruct
nm.Type = "destroy"
nm.Content = dsPath
nmb, err := json.Marshal(nm)
if err != nil {
panic(err)
}
messages <- fmt.Sprintf("%s\n", nmb)
}
}
}
No preview for this file type
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment