Update libkv dependency
This commit is contained in:
parent
cdab6b1796
commit
66e489addb
237 changed files with 62817 additions and 16116 deletions
|
@ -76,7 +76,7 @@ func NewDataStore(ctx context.Context, kvSource staert.KvSource, object Object,
|
||||||
|
|
||||||
func (d *Datastore) watchChanges() error {
|
func (d *Datastore) watchChanges() error {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
kvCh, err := d.kv.Watch(d.lockKey, stopCh)
|
kvCh, err := d.kv.Watch(d.lockKey, stopCh, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,16 @@ watch = true
|
||||||
#
|
#
|
||||||
prefix = "/traefik"
|
prefix = "/traefik"
|
||||||
|
|
||||||
|
# Force to use API V3 (otherwise still use API V2)
|
||||||
|
#
|
||||||
|
# Deprecated
|
||||||
|
#
|
||||||
|
# Optional
|
||||||
|
# Default: false
|
||||||
|
#
|
||||||
|
useAPIV3 = true
|
||||||
|
|
||||||
|
|
||||||
# Override default configuration template.
|
# Override default configuration template.
|
||||||
# For advanced users :)
|
# For advanced users :)
|
||||||
#
|
#
|
||||||
|
@ -59,3 +69,7 @@ prefix = "/traefik"
|
||||||
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
|
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
|
||||||
|
|
||||||
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.
|
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
The option `useAPIV3` allows using Etcd API V3 only if it's set to true.
|
||||||
|
This option is **deprecated** and API V2 won't be supported in the future.
|
||||||
|
|
|
@ -322,6 +322,10 @@ As a result, it may be possible for Træfik to read an intermediate configuratio
|
||||||
To solve this problem, Træfik supports a special key called `/traefik/alias`.
|
To solve this problem, Træfik supports a special key called `/traefik/alias`.
|
||||||
If set, Træfik use the value as an alternative key prefix.
|
If set, Træfik use the value as an alternative key prefix.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
The field `useAPIV3` allows using Etcd V3 API which should support updating multiple keys atomically with Etcd.
|
||||||
|
Etcd API V2 is deprecated and, in the future, Træfik will support API V3 by default.
|
||||||
|
|
||||||
Given the key structure below, Træfik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity).
|
Given the key structure below, Træfik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity).
|
||||||
|
|
||||||
| Key | Value |
|
| Key | Value |
|
||||||
|
|
210
examples/cluster/docker-compose.yml
Normal file
210
examples/cluster/docker-compose.yml
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
version: '2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
## KV part ##
|
||||||
|
|
||||||
|
# CONSUL
|
||||||
|
|
||||||
|
consul:
|
||||||
|
image: progrium/consul
|
||||||
|
command: -server -bootstrap -log-level debug -ui-dir /ui
|
||||||
|
ports:
|
||||||
|
- "8400:8400"
|
||||||
|
- "8500:8500"
|
||||||
|
- "8600:53/udp"
|
||||||
|
expose:
|
||||||
|
- "8300"
|
||||||
|
- "8301"
|
||||||
|
- "8301/udp"
|
||||||
|
- "8302"
|
||||||
|
- "8302/udp"
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.2
|
||||||
|
|
||||||
|
# ETCD V3
|
||||||
|
|
||||||
|
etcd3:
|
||||||
|
image: quay.io/coreos/etcd:v3.2.9
|
||||||
|
command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://10.0.1.12:2380 --listen-peer-urls http://10.0.1.12:2380 --advertise-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --listen-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --initial-cluster node1=http://10.0.1.12:2380 --debug
|
||||||
|
ports:
|
||||||
|
- "4001:4001"
|
||||||
|
- "2380:2380"
|
||||||
|
- "2379:2379"
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.12
|
||||||
|
|
||||||
|
etcdctl-ping:
|
||||||
|
image: tenstartups/etcdctl
|
||||||
|
command: --endpoints=[10.0.1.12:2379] get "traefik/acme/storagefile"
|
||||||
|
environment:
|
||||||
|
ETCDCTL_DIAL_: "TIMEOUT 10s"
|
||||||
|
ETCDCTL_API : "3"
|
||||||
|
networks:
|
||||||
|
- net
|
||||||
|
|
||||||
|
etcdctl-rm:
|
||||||
|
image: tenstartups/etcdctl
|
||||||
|
command: --endpoints=[10.0.1.12:2379] del "/traefik/acme/storagefile"
|
||||||
|
environment:
|
||||||
|
ETCDCTL_DIAL_: "TIMEOUT 10s"
|
||||||
|
ETCDCTL_API : "3"
|
||||||
|
networks:
|
||||||
|
- net
|
||||||
|
|
||||||
|
## BOULDER part ##
|
||||||
|
|
||||||
|
boulder:
|
||||||
|
image: containous/boulder:release
|
||||||
|
environment:
|
||||||
|
FAKE_DNS: 172.17.0.1
|
||||||
|
PKCS11_PROXY_SOCKET: tcp://boulder-hsm:5657
|
||||||
|
extra_hosts:
|
||||||
|
- le.wtf:127.0.0.1
|
||||||
|
- boulder:127.0.0.1
|
||||||
|
ports:
|
||||||
|
- 4000:4000 # ACME
|
||||||
|
- 4002:4002 # OCSP
|
||||||
|
- 4003:4003 # OCSP
|
||||||
|
- 4500:4500 # ct-test-srv
|
||||||
|
- 8000:8000 # debug ports
|
||||||
|
- 8001:8001
|
||||||
|
- 8002:8002
|
||||||
|
- 8003:8003
|
||||||
|
- 8004:8004
|
||||||
|
- 8055:8055 # dns-test-srv updates
|
||||||
|
- 9380:9380 # mail-test-srv
|
||||||
|
- 9381:9381 # mail-test-srv
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- bhsm
|
||||||
|
- bmysql
|
||||||
|
- brabbitmq
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.3
|
||||||
|
|
||||||
|
bhsm:
|
||||||
|
image: letsencrypt/boulder-tools:2016-11-02
|
||||||
|
hostname: boulder-hsm
|
||||||
|
environment:
|
||||||
|
PKCS11_DAEMON_SOCKET: tcp://0.0.0.0:5657
|
||||||
|
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm.so
|
||||||
|
expose:
|
||||||
|
- 5657
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.4
|
||||||
|
aliases:
|
||||||
|
- boulder-hsm
|
||||||
|
bmysql:
|
||||||
|
image: mariadb:10.1
|
||||||
|
hostname: boulder-mysql
|
||||||
|
environment:
|
||||||
|
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.5
|
||||||
|
aliases:
|
||||||
|
- boulder-mysql
|
||||||
|
|
||||||
|
brabbitmq:
|
||||||
|
image: rabbitmq:3-alpine
|
||||||
|
hostname: boulder-rabbitmq
|
||||||
|
environment:
|
||||||
|
RABBITMQ_NODE_IP_ADDRESS: "0.0.0.0"
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.6
|
||||||
|
aliases:
|
||||||
|
- boulder-rabbitmq
|
||||||
|
|
||||||
|
## TRAEFIK part ##
|
||||||
|
|
||||||
|
traefik-storeconfig:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
image: containous/traefik
|
||||||
|
volumes:
|
||||||
|
- "./traefik.toml:/traefik.toml:ro"
|
||||||
|
- "./acme.json:/acme.json:ro"
|
||||||
|
command: storeconfig --debug
|
||||||
|
networks:
|
||||||
|
- net
|
||||||
|
|
||||||
|
traefik01:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
image: containous/traefik
|
||||||
|
command: ${TRAEFIK_CMD}
|
||||||
|
extra_hosts:
|
||||||
|
- traefik.boulder.com:172.17.0.1
|
||||||
|
volumes:
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
expose:
|
||||||
|
- "443"
|
||||||
|
- "5001"
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "8080:8080"
|
||||||
|
- "443:443"
|
||||||
|
- "5001:443" # Needed for SNI challenge
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.8
|
||||||
|
|
||||||
|
traefik02:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
image: containous/traefik
|
||||||
|
command: ${TRAEFIK_CMD}
|
||||||
|
extra_hosts:
|
||||||
|
- traefik.boulder.com:172.17.0.1
|
||||||
|
volumes:
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
expose:
|
||||||
|
- "443"
|
||||||
|
- "5001"
|
||||||
|
ports:
|
||||||
|
- "88:80"
|
||||||
|
- "8888:8080"
|
||||||
|
- "8443:443"
|
||||||
|
depends_on:
|
||||||
|
- traefik01
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.9
|
||||||
|
|
||||||
|
whoami01:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
expose:
|
||||||
|
- "80"
|
||||||
|
labels:
|
||||||
|
- "traefik.port=80"
|
||||||
|
- "traefik.backend=wam01"
|
||||||
|
- "traefik.frontend.rule=Host:who01.localhost.com"
|
||||||
|
- "traefik.enable=true"
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
ipv4_address: 10.0.1.10
|
||||||
|
|
||||||
|
whoami02:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
expose:
|
||||||
|
- "80"
|
||||||
|
labels:
|
||||||
|
- "traefik.port=80"
|
||||||
|
- "traefik.backend=wam02"
|
||||||
|
- "traefik.frontend.rule=Host:who02.localhost.com"
|
||||||
|
- "traefik.enable=true"
|
||||||
|
networks:
|
||||||
|
- net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 10.0.1.0/26
|
247
examples/cluster/manage_cluster_docker_environment.sh
Executable file
247
examples/cluster/manage_cluster_docker_environment.sh
Executable file
|
@ -0,0 +1,247 @@
|
||||||
|
#! /usr/bin/env bash
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
readonly basedir=$(dirname $0)
|
||||||
|
readonly doc_file=$basedir"/docker-compose.yml"
|
||||||
|
export COMPOSE_PROJECT_NAME="cluster"
|
||||||
|
|
||||||
|
# Stop and remove Docker environment
|
||||||
|
down_environment() {
|
||||||
|
echo "DOWN Docker environment"
|
||||||
|
! docker-compose -f $doc_file down -v &>/dev/null && \
|
||||||
|
echo "[ERROR] Unable to stop the Docker environment" && exit 11
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create and start Docker-compose environment or subpart of its services (if services are listed)
|
||||||
|
# $@ : List of services to start (optional)
|
||||||
|
up_environment() {
|
||||||
|
echo "START Docker environment "$@
|
||||||
|
! docker-compose -f $doc_file up -d $@ &>/dev/null && \
|
||||||
|
echo "[ERROR] Unable to start Docker environment ${@}" && exit 21
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Stop and remove Docker environment
|
||||||
|
delete_services() {
|
||||||
|
echo "DELETE services "$@
|
||||||
|
! docker-compose -f $doc_file stop $@ &>/dev/null && \
|
||||||
|
echo "[ERROR] Unable to stop services "$@ && exit 31
|
||||||
|
! docker-compose -f $doc_file rm -vf $@ &>/dev/null && \
|
||||||
|
echo "[ERROR] Unable to delete services "$@ && exit 31
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Init the environment : get IP address and create needed files
|
||||||
|
init_acme_json() {
|
||||||
|
echo "CREATE empty acme.json file"
|
||||||
|
rm -f $basedir/acme.json && \
|
||||||
|
touch $basedir/acme.json && \
|
||||||
|
echo "{}" > $basedir/acme.json && \
|
||||||
|
chmod 600 $basedir/acme.json # Needed for ACME
|
||||||
|
}
|
||||||
|
|
||||||
|
start_consul() {
|
||||||
|
up_environment consul
|
||||||
|
waiting_counter=12
|
||||||
|
# Not start Traefik store config if consul is not started
|
||||||
|
echo "WAIT for consul..."
|
||||||
|
sleep 5
|
||||||
|
while [[ -z $(curl -s http://10.0.1.2:8500/v1/status/leader) ]]; do
|
||||||
|
sleep 5
|
||||||
|
let waiting_counter-=1
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to start consul container in the allowed time, the Docker environment will be stopped"
|
||||||
|
down_environment
|
||||||
|
exit 41
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
start_etcd3() {
|
||||||
|
up_environment etcd3
|
||||||
|
waiting_counter=12
|
||||||
|
# Not start Traefik store config if consul is not started
|
||||||
|
echo "WAIT for ETCD3..."
|
||||||
|
while [[ -z $(curl -s --connect-timeout 2 http://10.0.1.12:2379/version) ]]; do
|
||||||
|
sleep 5
|
||||||
|
let waiting_counter-=1
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to start etcd3 container in the allowed time, the Docker environment will be stopped"
|
||||||
|
down_environment
|
||||||
|
exit 51
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
start_storeconfig_consul() {
|
||||||
|
init_acme_json
|
||||||
|
# Create traefik.toml with consul provider
|
||||||
|
cp $basedir/traefik.toml.tmpl $basedir/traefik.toml
|
||||||
|
echo '
|
||||||
|
[consul]
|
||||||
|
endpoint = "10.0.1.2:8500"
|
||||||
|
watch = true
|
||||||
|
prefix = "traefik"' >> $basedir/traefik.toml
|
||||||
|
up_environment traefik-storeconfig
|
||||||
|
rm -f $basedir/traefik.toml && rm -f $basedir/acme.json
|
||||||
|
# Delete acme-storage-file key
|
||||||
|
waiting_counter=5
|
||||||
|
# Not start Traefik store config if consul is not started
|
||||||
|
echo "Delete storage file key..."
|
||||||
|
while [[ -z $(curl -s http://10.0.1.2:8500/v1/kv/traefik/acme/storagefile) && $waiting_counter -gt 0 ]]; do
|
||||||
|
sleep 5
|
||||||
|
let waiting_counter-=1
|
||||||
|
done
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[WARN] Unable to get storagefile key in consul"
|
||||||
|
else
|
||||||
|
curl -s --request DELETE http://10.0.1.2:8500/v1/kv/traefik/acme/storagefile
|
||||||
|
ret=$1
|
||||||
|
if [[ $ret -ne 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to delete storagefile key from consul kv."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
start_storeconfig_etcd3() {
|
||||||
|
init_acme_json
|
||||||
|
# Create traefik.toml with consul provider
|
||||||
|
cp $basedir/traefik.toml.tmpl $basedir/traefik.toml
|
||||||
|
echo '
|
||||||
|
[etcd]
|
||||||
|
endpoint = "10.0.1.12:2379"
|
||||||
|
watch = true
|
||||||
|
prefix = "/traefik"
|
||||||
|
useAPIV3 = true' >> $basedir/traefik.toml
|
||||||
|
up_environment traefik-storeconfig
|
||||||
|
rm -f $basedir/traefik.toml && rm -f $basedir/acme.json
|
||||||
|
# Delete acme-storage-file key
|
||||||
|
waiting_counter=5
|
||||||
|
# Not start Traefik store config if consul is not started
|
||||||
|
echo "Delete storage file key..."
|
||||||
|
while [[ $(docker-compose -f $doc_file up --exit-code-from etcdctl-ping etcdctl-ping &>/dev/null) -ne 0 && $waiting_counter -gt 0 ]]; do
|
||||||
|
sleep 5
|
||||||
|
let waiting_counter-=1
|
||||||
|
done
|
||||||
|
# Not start Traefik store config if consul is not started
|
||||||
|
echo "Delete storage file key from ETCD3..."
|
||||||
|
|
||||||
|
up_environment etcdctl-rm && \
|
||||||
|
delete_services etcdctl-rm traefik-storeconfig etcdctl-ping
|
||||||
|
}
|
||||||
|
|
||||||
|
start_traefik() {
|
||||||
|
up_environment traefik01
|
||||||
|
# Waiting for the first instance which is mapped to the host as leader before to start the second one
|
||||||
|
waiting_counter=5
|
||||||
|
echo "WAIT for traefik leader..."
|
||||||
|
sleep 10
|
||||||
|
while [[ -z $(curl -s --connect-timeout 3 http://10.0.1.8:8080/ping) ]]; do
|
||||||
|
sleep 2
|
||||||
|
let waiting_counter-=1
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to start Traefik leader container in the allowed time, the Docker environment will be stopped"
|
||||||
|
down_environment
|
||||||
|
exit 51
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
up_environment whoami01
|
||||||
|
waiting_counter=5
|
||||||
|
echo "WAIT for whoami..."
|
||||||
|
sleep 10
|
||||||
|
while [[ -z $(curl -s --connect-timeout 3 http://10.0.1.10) ]]; do
|
||||||
|
sleep 2
|
||||||
|
let waiting_counter-=1
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to start whoami container in the allowed time, the Docker environment will be stopped"
|
||||||
|
down_environment
|
||||||
|
exit 52
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
up_environment traefik02 whoami02
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start boulder services
|
||||||
|
start_boulder() {
|
||||||
|
echo "Start boulder environment"
|
||||||
|
up_environment bmysql brabbitmq bhsm boulder
|
||||||
|
waiting_counter=12
|
||||||
|
# Not start Traefik if boulder is not started
|
||||||
|
echo "WAIT for boulder..."
|
||||||
|
while [[ -z $(curl -s http://10.0.1.3:4000/directory) ]]; do
|
||||||
|
sleep 5
|
||||||
|
let waiting_counter-=1
|
||||||
|
if [[ $waiting_counter -eq 0 ]]; then
|
||||||
|
echo "[ERROR] Unable to start boulder container in the allowed time, the Docker environment will be stopped"
|
||||||
|
down_environment
|
||||||
|
exit 61
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "Boulder started."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Script usage
|
||||||
|
show_usage() {
|
||||||
|
echo
|
||||||
|
echo "USAGE : manage_cluster_docker_environment.sh [--start [--consul|--etcd3]|--stop|--restart [--consul|--etcd3]]"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main method
|
||||||
|
# $@ All parameters given
|
||||||
|
main() {
|
||||||
|
|
||||||
|
[[ $# -lt 1 && $# -gt 2 ]] && show_usage && exit 1
|
||||||
|
|
||||||
|
case $1 in
|
||||||
|
"--start")
|
||||||
|
[[ $# -ne 2 ]] && show_usage && exit 2
|
||||||
|
# The domains who01.localhost.com and who02.localhost.com have to refer 127.0.0.1
|
||||||
|
# I, the /etc/hosts file
|
||||||
|
for whoami_idx in "01" "02"; do
|
||||||
|
[[ -z $(cat /etc/hosts | grep "127.0.0.1" | grep -vE "^#" | grep "who${whoami_idx}.localhost.com") ]] && \
|
||||||
|
echo "[ERROR] Domain who${whoami_idx}.localhost.com has to refer to 127.0.0.1 into /etc/hosts file." && \
|
||||||
|
exit 3
|
||||||
|
done
|
||||||
|
case $2 in
|
||||||
|
"--etcd3")
|
||||||
|
echo "USE ETCD V3 AS KV STORE"
|
||||||
|
export TRAEFIK_CMD="--etcd --etcd.endpoint=10.0.1.12:2379 --etcd.useAPIV3=true"
|
||||||
|
start_boulder && \
|
||||||
|
start_etcd3 && \
|
||||||
|
start_storeconfig_etcd3 && \
|
||||||
|
start_traefik
|
||||||
|
;;
|
||||||
|
"--consul")
|
||||||
|
echo "USE CONSUL AS KV STORE"
|
||||||
|
export TRAEFIK_CMD="--consul --consul.endpoint=10.0.1.2:8500"
|
||||||
|
start_boulder && \
|
||||||
|
start_consul && \
|
||||||
|
start_storeconfig_consul && \
|
||||||
|
start_traefik
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
show_usage && exit 4
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo "ENVIRONMENT SUCCESSFULLY STARTED"
|
||||||
|
;;
|
||||||
|
"--stop")
|
||||||
|
! down_environment
|
||||||
|
echo "ENVIRONMENT SUCCESSFULLY STOPPED"
|
||||||
|
;;
|
||||||
|
"--restart")
|
||||||
|
[[ $# -ne 2 ]] && show_usage && exit 5
|
||||||
|
down_environment
|
||||||
|
main --start $2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
show_usage && exit 6
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
main $@
|
28
examples/cluster/traefik.toml.tmpl
Normal file
28
examples/cluster/traefik.toml.tmpl
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
logLevel = "DEBUG"
|
||||||
|
|
||||||
|
defaultEntryPoints = ["http", "https"]
|
||||||
|
|
||||||
|
[entryPoints]
|
||||||
|
[entryPoints.http]
|
||||||
|
address = ":80"
|
||||||
|
[entryPoints.https]
|
||||||
|
address = ":443"
|
||||||
|
[entryPoints.https.tls]
|
||||||
|
|
||||||
|
[acme]
|
||||||
|
email = "test@traefik.io"
|
||||||
|
storage = "traefik/acme/account"
|
||||||
|
storageFile = "/acme.json"
|
||||||
|
entryPoint = "https"
|
||||||
|
OnHostRule = true
|
||||||
|
caServer = "http://traefik.boulder.com:4000/directory"
|
||||||
|
|
||||||
|
|
||||||
|
[web]
|
||||||
|
address = ":8080"
|
||||||
|
|
||||||
|
[docker]
|
||||||
|
endpoint = "unix:///var/run/docker.sock"
|
||||||
|
domain = "localhost.com"
|
||||||
|
watch = true
|
||||||
|
exposedbydefault = false
|
|
@ -1,25 +1,96 @@
|
||||||
#!/bin/sh
|
#! /usr/bin/env bash
|
||||||
|
|
||||||
# backend 1
|
#
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="NetworkErrorRatio() > 0.5" http://localhost:2379/v2/keys/traefik/backends/backend1/circuitbreaker/expression
|
# Insert data in ETCD V3
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.2:80" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server1/url
|
function insert_etcd2_data() {
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="10" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server1/weight
|
# backend 1
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.3:80" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server2/url
|
curl -i -H "Accept: application/json" -X PUT -d value="NetworkErrorRatio() > 0.5" http://localhost:2379/v2/keys/traefik/backends/backend1/circuitbreaker/expression
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="1" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server2/weight
|
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.2:80" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server1/url
|
||||||
|
curl -i -H "Accept: application/json" -X PUT -d value="10" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server1/weight
|
||||||
|
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.3:80" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server2/url
|
||||||
|
curl -i -H "Accept: application/json" -X PUT -d value="1" http://localhost:2379/v2/keys/traefik/backends/backend1/servers/server2/weight
|
||||||
|
|
||||||
# backend 2
|
# backend 2
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="drr" http://localhost:2379/v2/keys/traefik/backends/backend2/loadbalancer/method
|
curl -i -H "Accept: application/json" -X PUT -d value="drr" http://localhost:2379/v2/keys/traefik/backends/backend2/loadbalancer/method
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.4:80" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server1/url
|
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.4:80" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server1/url
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="1" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server1/weight
|
curl -i -H "Accept: application/json" -X PUT -d value="1" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server1/weight
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.5:80" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server2/url
|
curl -i -H "Accept: application/json" -X PUT -d value="http://172.17.0.5:80" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server2/url
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="2" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server2/weight
|
curl -i -H "Accept: application/json" -X PUT -d value="2" http://localhost:2379/v2/keys/traefik/backends/backend2/servers/server2/weight
|
||||||
|
|
||||||
# frontend 1
|
# frontend 1
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="backend2" http://localhost:2379/v2/keys/traefik/frontends/frontend1/backend
|
curl -i -H "Accept: application/json" -X PUT -d value="backend2" http://localhost:2379/v2/keys/traefik/frontends/frontend1/backend
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http" http://localhost:2379/v2/keys/traefik/frontends/frontend1/entrypoints
|
curl -i -H "Accept: application/json" -X PUT -d value="http" http://localhost:2379/v2/keys/traefik/frontends/frontend1/entrypoints
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="Host:test.localhost" http://localhost:2379/v2/keys/traefik/frontends/frontend1/routes/test_1/rule
|
curl -i -H "Accept: application/json" -X PUT -d value="Host:test.localhost" http://localhost:2379/v2/keys/traefik/frontends/frontend1/routes/test_1/rule
|
||||||
|
|
||||||
# frontend 2
|
# frontend 2
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="backend1" http://localhost:2379/v2/keys/traefik/frontends/frontend2/backend
|
curl -i -H "Accept: application/json" -X PUT -d value="backend1" http://localhost:2379/v2/keys/traefik/frontends/frontend2/backend
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="http" http://localhost:2379/v2/keys/traefik/frontends/frontend2/entrypoints
|
curl -i -H "Accept: application/json" -X PUT -d value="http" http://localhost:2379/v2/keys/traefik/frontends/frontend2/entrypoints
|
||||||
curl -i -H "Accept: application/json" -X PUT -d value="Path:/test" http://localhost:2379/v2/keys/traefik/frontends/frontend2/routes/test_2/rule
|
curl -i -H "Accept: application/json" -X PUT -d value="Path:/test" http://localhost:2379/v2/keys/traefik/frontends/frontend2/routes/test_2/rule
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Insert data in ETCD V3
|
||||||
|
# $1 = ECTD IP address
|
||||||
|
# Note : This function allows adding data in a ETCD V3 which is directly installed on a host
|
||||||
|
# or in container which binds its port 2379 on a host in the way to allows etcd_client container to access it.
|
||||||
|
function insert_etcd3_data() {
|
||||||
|
|
||||||
|
readonly etcd_ip=$1
|
||||||
|
# backend 1
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend1/circuitbreaker/expression" "NetworkErrorRatio() > 0.5"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend1/servers/server1/url" "http://172.17.0.2:80"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend1/servers/server1/weight" "10"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend1/servers/server2/url" "http://172.17.0.3:80"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend1/servers/server2/weight" "1"
|
||||||
|
|
||||||
|
# backend 2
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend2/loadbalancer/method" "drr"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend2/servers/server1/url" "http://172.17.0.4:80"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend2/servers/server1/weight" "1"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend2/servers/server2/url" "http://172.17.0.5:80"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/backends/backend2/servers/server2/weight" "2"
|
||||||
|
|
||||||
|
# frontend 1
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend1/backend" "backend2"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik//frontends/frontend1/entrypoints" "http"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend1/routes/test_1/rule" "Host:test.localhost"
|
||||||
|
|
||||||
|
# frontend 2
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend2/backend" "backend1"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend2/entrypoints" "http"
|
||||||
|
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend2/routes/test_2/rule" "Path:/test"
|
||||||
|
}
|
||||||
|
|
||||||
|
function show_usage() {
|
||||||
|
echo "USAGE : etcd-config.sh ETCD_API_VERSION [ETCD_IP_ADDRESS]"
|
||||||
|
echo " ETCD_API_VERSION : Values v2 or V3 (v3 requires ETCD_IP_ADDRESS)"
|
||||||
|
echo " ETCD_IP_ADDRESS : Host ETCD IP address (not 127.0.0.1)"
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
case $# in
|
||||||
|
1)
|
||||||
|
if [[ $1 == "V2" ]]; then
|
||||||
|
insert_etcd2_data
|
||||||
|
else
|
||||||
|
show_usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
if [[ $1 == "V3" && $2 != "127.0.0.1" && ! -z $(echo $2 | grep -oE "([0-9]+(\.)?){4}") ]]; then
|
||||||
|
insert_etcd3_data $2
|
||||||
|
else
|
||||||
|
show_usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
show_usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
main $@
|
49
glide.lock
generated
49
glide.lock
generated
|
@ -1,5 +1,5 @@
|
||||||
hash: 6e35766dd261f1eb0c2dfaadb8e0bf235eb4f3f942de776f9b48faf8a868a2d0
|
hash: 2a604d8b74e8659df7db72d063432fa0822fdee6c81bc6657efa3c1bf0d9cd8a
|
||||||
updated: 2017-11-15T18:39:20.364720581+02:00
|
updated: 2017-11-17T14:21:55.148450413+01:00
|
||||||
imports:
|
imports:
|
||||||
- name: cloud.google.com/go
|
- name: cloud.google.com/go
|
||||||
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
|
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
|
||||||
|
@ -92,13 +92,27 @@ imports:
|
||||||
- name: github.com/containous/mux
|
- name: github.com/containous/mux
|
||||||
version: 06ccd3e75091eb659b1d720cda0e16bc7057954c
|
version: 06ccd3e75091eb659b1d720cda0e16bc7057954c
|
||||||
- name: github.com/containous/staert
|
- name: github.com/containous/staert
|
||||||
version: 1e26a71803e428fd933f5f9c8e50a26878f53147
|
version: af517d5b70db9c4b0505e0144fcc62b054057d2a
|
||||||
|
- name: github.com/coreos/bbolt
|
||||||
|
version: 3c6cbfb299c11444eb2f8c9d48f0d2ce09157423
|
||||||
- name: github.com/coreos/etcd
|
- name: github.com/coreos/etcd
|
||||||
version: c400d05d0aa73e21e431c16145e558d624098018
|
version: f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b
|
||||||
subpackages:
|
subpackages:
|
||||||
|
- auth/authpb
|
||||||
- client
|
- client
|
||||||
|
- clientv3
|
||||||
|
- clientv3/concurrency
|
||||||
|
- etcdserver/api/v3rpc/rpctypes
|
||||||
|
- etcdserver/etcdserverpb
|
||||||
|
- mvcc/mvccpb
|
||||||
- pkg/pathutil
|
- pkg/pathutil
|
||||||
|
- pkg/srv
|
||||||
- pkg/types
|
- pkg/types
|
||||||
|
- version
|
||||||
|
- name: github.com/coreos/go-semver
|
||||||
|
version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
|
||||||
|
subpackages:
|
||||||
|
- semver
|
||||||
- name: github.com/coreos/go-oidc
|
- name: github.com/coreos/go-oidc
|
||||||
version: 5644a2f50e2d2d5ba0b474bc5bc55fea1925936d
|
version: 5644a2f50e2d2d5ba0b474bc5bc55fea1925936d
|
||||||
subpackages:
|
subpackages:
|
||||||
|
@ -202,14 +216,20 @@ imports:
|
||||||
- name: github.com/docker/go-units
|
- name: github.com/docker/go-units
|
||||||
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||||
- name: github.com/docker/leadership
|
- name: github.com/docker/leadership
|
||||||
version: 0a913e2d71a12fd14a028452435cb71ac8d82cb6
|
version: af20da7d3e62be9259835e93261acf931b5adecf
|
||||||
|
repo: https://github.com/containous/leadership.git
|
||||||
|
vcs: git
|
||||||
- name: github.com/docker/libkv
|
- name: github.com/docker/libkv
|
||||||
version: 93ab0e6c056d325dfbb11e1d58a3b4f5f62e7f3c
|
version: 5e4bb288a9a74320bb03f5c18d6bdbab0d8049de
|
||||||
|
repo: https://github.com/abronan/libkv.git
|
||||||
|
vcs: git
|
||||||
subpackages:
|
subpackages:
|
||||||
- store
|
- store
|
||||||
- store/boltdb
|
- store/boltdb
|
||||||
- store/consul
|
- store/consul
|
||||||
- store/etcd
|
- store/etcd
|
||||||
|
- store/etcd/v2
|
||||||
|
- store/etcd/v3
|
||||||
- store/zookeeper
|
- store/zookeeper
|
||||||
- name: github.com/docker/libtrust
|
- name: github.com/docker/libtrust
|
||||||
version: 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
version: 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||||
|
@ -278,9 +298,11 @@ imports:
|
||||||
- name: github.com/golang/glog
|
- name: github.com/golang/glog
|
||||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||||
- name: github.com/golang/protobuf
|
- name: github.com/golang/protobuf
|
||||||
version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
|
version: 4bd1920723d7b7c925de087aa32e2187708897f7
|
||||||
subpackages:
|
subpackages:
|
||||||
|
- jsonpb
|
||||||
- proto
|
- proto
|
||||||
|
- ptypes/any
|
||||||
- name: github.com/google/go-github
|
- name: github.com/google/go-github
|
||||||
version: fe7d11f8add400587b6718d9f39a62e42cb04c28
|
version: fe7d11f8add400587b6718d9f39a62e42cb04c28
|
||||||
subpackages:
|
subpackages:
|
||||||
|
@ -552,7 +574,7 @@ imports:
|
||||||
- pbkdf2
|
- pbkdf2
|
||||||
- scrypt
|
- scrypt
|
||||||
- name: golang.org/x/net
|
- name: golang.org/x/net
|
||||||
version: 242b6b35177ec3909636b6cf6a47e8c2c6324b5d
|
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
|
||||||
subpackages:
|
subpackages:
|
||||||
- context
|
- context
|
||||||
- context/ctxhttp
|
- context/ctxhttp
|
||||||
|
@ -578,9 +600,10 @@ imports:
|
||||||
- unix
|
- unix
|
||||||
- windows
|
- windows
|
||||||
- name: golang.org/x/text
|
- name: golang.org/x/text
|
||||||
version: 2910a502d2bf9e43193af9d68ca516529614eed3
|
version: 4ee4af566555f5fbe026368b75596286a312663a
|
||||||
subpackages:
|
subpackages:
|
||||||
- cases
|
- cases
|
||||||
|
- internal
|
||||||
- internal/tag
|
- internal/tag
|
||||||
- language
|
- language
|
||||||
- runes
|
- runes
|
||||||
|
@ -613,11 +636,16 @@ imports:
|
||||||
- internal/remote_api
|
- internal/remote_api
|
||||||
- internal/urlfetch
|
- internal/urlfetch
|
||||||
- urlfetch
|
- urlfetch
|
||||||
|
- name: google.golang.org/genproto
|
||||||
|
version: 09f6ed296fc66555a25fe4ce95173148778dfa85
|
||||||
|
subpackages:
|
||||||
|
- googleapis/rpc/status
|
||||||
- name: google.golang.org/grpc
|
- name: google.golang.org/grpc
|
||||||
version: cdee119ee21e61eef7093a41ba148fa83585e143
|
version: b8669c35455183da6d5c474ea6e72fbf55183274
|
||||||
subpackages:
|
subpackages:
|
||||||
- codes
|
- codes
|
||||||
- credentials
|
- credentials
|
||||||
|
- grpclb/grpc_lb_v1
|
||||||
- grpclog
|
- grpclog
|
||||||
- internal
|
- internal
|
||||||
- keepalive
|
- keepalive
|
||||||
|
@ -625,6 +653,7 @@ imports:
|
||||||
- naming
|
- naming
|
||||||
- peer
|
- peer
|
||||||
- stats
|
- stats
|
||||||
|
- status
|
||||||
- tap
|
- tap
|
||||||
- transport
|
- transport
|
||||||
- name: gopkg.in/fsnotify.v1
|
- name: gopkg.in/fsnotify.v1
|
||||||
|
|
13
glide.yaml
13
glide.yaml
|
@ -26,7 +26,7 @@ import:
|
||||||
- package: github.com/urfave/negroni
|
- package: github.com/urfave/negroni
|
||||||
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
|
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
|
||||||
- package: github.com/containous/staert
|
- package: github.com/containous/staert
|
||||||
version: 1e26a71803e428fd933f5f9c8e50a26878f53147
|
version: ^v2.0.0
|
||||||
- package: github.com/docker/docker
|
- package: github.com/docker/docker
|
||||||
version: 75c7536d2e2e328b644bf69153de879d1d197988
|
version: 75c7536d2e2e328b644bf69153de879d1d197988
|
||||||
- package: github.com/docker/go-connections
|
- package: github.com/docker/go-connections
|
||||||
|
@ -36,12 +36,17 @@ import:
|
||||||
- tlsconfig
|
- tlsconfig
|
||||||
- package: github.com/docker/go-units
|
- package: github.com/docker/go-units
|
||||||
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||||
|
- package: github.com/coreos/etcd
|
||||||
|
version: v3.2.9
|
||||||
- package: github.com/docker/libkv
|
- package: github.com/docker/libkv
|
||||||
|
repo: https://github.com/abronan/libkv.git
|
||||||
|
vcs: git
|
||||||
subpackages:
|
subpackages:
|
||||||
- store
|
- store
|
||||||
- store/boltdb
|
- store/boltdb
|
||||||
- store/consul
|
- store/consul
|
||||||
- store/etcd
|
- store/etcd/v2
|
||||||
|
- store/etcd/v3
|
||||||
- store/zookeeper
|
- store/zookeeper
|
||||||
- package: github.com/elazarl/go-bindata-assetfs
|
- package: github.com/elazarl/go-bindata-assetfs
|
||||||
- package: github.com/containous/mux
|
- package: github.com/containous/mux
|
||||||
|
@ -83,6 +88,8 @@ import:
|
||||||
repo: https://github.com/containous/gziphandler.git
|
repo: https://github.com/containous/gziphandler.git
|
||||||
vcs: git
|
vcs: git
|
||||||
- package: github.com/docker/leadership
|
- package: github.com/docker/leadership
|
||||||
|
repo: https://github.com/containous/leadership.git
|
||||||
|
vcs: git
|
||||||
- package: github.com/satori/go.uuid
|
- package: github.com/satori/go.uuid
|
||||||
version: ^1.1.0
|
version: ^1.1.0
|
||||||
- package: k8s.io/client-go
|
- package: k8s.io/client-go
|
||||||
|
@ -187,7 +194,7 @@ import:
|
||||||
- package: github.com/googleapis/gax-go
|
- package: github.com/googleapis/gax-go
|
||||||
version: 9af46dd5a1713e8b5cd71106287eba3cefdde50b
|
version: 9af46dd5a1713e8b5cd71106287eba3cefdde50b
|
||||||
- package: google.golang.org/grpc
|
- package: google.golang.org/grpc
|
||||||
version: v1.2.0
|
version: v1.5.2
|
||||||
- package: github.com/unrolled/secure
|
- package: github.com/unrolled/secure
|
||||||
version: 824e85271811af89640ea25620c67f6c2eed987e
|
version: 824e85271811af89640ea25620c67f6c2eed987e
|
||||||
- package: github.com/Nvveen/Gotty
|
- package: github.com/Nvveen/Gotty
|
||||||
|
|
|
@ -346,7 +346,7 @@ func (s *ConsulSuite) TestCommandStoreConfig(c *check.C) {
|
||||||
for key, value := range checkmap {
|
for key, value := range checkmap {
|
||||||
var p *store.KVPair
|
var p *store.KVPair
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
p, err = s.kv.Get(key)
|
p, err = s.kv.Get(key, nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
428
integration/etcd3_test.go
Normal file
428
integration/etcd3_test.go
Normal file
|
@ -0,0 +1,428 @@
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containous/traefik/integration/try"
|
||||||
|
"github.com/docker/libkv"
|
||||||
|
"github.com/docker/libkv/store"
|
||||||
|
"github.com/docker/libkv/store/etcd/v3"
|
||||||
|
"github.com/go-check/check"
|
||||||
|
|
||||||
|
checker "github.com/vdemeester/shakers"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Services IP addresses fixed in the configuration
|
||||||
|
ipEtcd string = "172.18.0.2"
|
||||||
|
ipWhoami01 string = "172.18.0.3"
|
||||||
|
ipWhoami02 string = "172.18.0.4"
|
||||||
|
ipWhoami03 string = "172.18.0.5"
|
||||||
|
ipWhoami04 string = "172.18.0.6"
|
||||||
|
|
||||||
|
traefikEtcdURL string = "http://127.0.0.1:8000/"
|
||||||
|
traefikWebEtcdURL string = "http://127.0.0.1:8081/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Etcd test suites (using libcompose)
|
||||||
|
type Etcd3Suite struct {
|
||||||
|
BaseSuite
|
||||||
|
kv store.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) SetUpTest(c *check.C) {
|
||||||
|
s.createComposeProject(c, "etcd3")
|
||||||
|
s.composeProject.Start(c)
|
||||||
|
|
||||||
|
etcdv3.Register()
|
||||||
|
url := ipEtcd + ":2379"
|
||||||
|
kv, err := libkv.NewStore(
|
||||||
|
store.ETCDV3,
|
||||||
|
[]string{url},
|
||||||
|
&store.Config{
|
||||||
|
ConnectionTimeout: 30 * time.Second,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
c.Fatal("Cannot create store etcd")
|
||||||
|
}
|
||||||
|
s.kv = kv
|
||||||
|
|
||||||
|
// wait for etcd
|
||||||
|
err = try.Do(60*time.Second, func() error {
|
||||||
|
_, err := kv.Exists("test", nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TearDownTest(c *check.C) {
|
||||||
|
// shutdown and delete compose project
|
||||||
|
if s.composeProject != nil {
|
||||||
|
s.composeProject.Stop(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TearDownSuite(c *check.C) {}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TestSimpleConfiguration(c *check.C) {
|
||||||
|
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
|
||||||
|
EtcdHost string
|
||||||
|
UseAPIV3 bool
|
||||||
|
}{
|
||||||
|
ipEtcd,
|
||||||
|
true,
|
||||||
|
})
|
||||||
|
defer os.Remove(file)
|
||||||
|
|
||||||
|
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||||
|
defer display(c)
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
// TODO validate : run on 80
|
||||||
|
// Expected a 404 as we did not configure anything
|
||||||
|
err = try.GetRequest(traefikEtcdURL, 1*time.Second, try.StatusCodeIs(http.StatusNotFound))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TestNominalConfiguration(c *check.C) {
|
||||||
|
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
|
||||||
|
EtcdHost string
|
||||||
|
UseAPIV3 bool
|
||||||
|
}{
|
||||||
|
ipEtcd,
|
||||||
|
true,
|
||||||
|
})
|
||||||
|
defer os.Remove(file)
|
||||||
|
|
||||||
|
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||||
|
defer display(c)
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
backend1 := map[string]string{
|
||||||
|
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
|
||||||
|
"/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server1/weight": "10",
|
||||||
|
"/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server2/weight": "1",
|
||||||
|
}
|
||||||
|
backend2 := map[string]string{
|
||||||
|
"/traefik/backends/backend2/loadbalancer/method": "drr",
|
||||||
|
"/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server1/weight": "1",
|
||||||
|
"/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server2/weight": "2",
|
||||||
|
}
|
||||||
|
frontend1 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend1/backend": "backend2",
|
||||||
|
"/traefik/frontends/frontend1/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend1/priority": "1",
|
||||||
|
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost",
|
||||||
|
}
|
||||||
|
frontend2 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend2/backend": "backend1",
|
||||||
|
"/traefik/frontends/frontend2/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend2/priority": "10",
|
||||||
|
"/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test",
|
||||||
|
}
|
||||||
|
for key, value := range backend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range backend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for etcd
|
||||||
|
err = try.Do(60*time.Second, func() error {
|
||||||
|
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest(traefikWebEtcdURL+"api/providers", 60*time.Second, try.BodyContains("Path:/test"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
client := &http.Client{}
|
||||||
|
req, err := http.NewRequest(http.MethodGet, traefikEtcdURL, nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
req.Host = "test.localhost"
|
||||||
|
response, err := client.Do(req)
|
||||||
|
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(response.StatusCode, checker.Equals, http.StatusOK)
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(response.Body)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
if !strings.Contains(string(body), ipWhoami03) &&
|
||||||
|
!strings.Contains(string(body), ipWhoami04) {
|
||||||
|
c.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, traefikEtcdURL+"test", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
response, err = client.Do(req)
|
||||||
|
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(response.StatusCode, checker.Equals, http.StatusOK)
|
||||||
|
|
||||||
|
body, err = ioutil.ReadAll(response.Body)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
if !strings.Contains(string(body), ipWhoami01) &&
|
||||||
|
!strings.Contains(string(body), ipWhoami02) {
|
||||||
|
c.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, traefikEtcdURL+"test2", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
req.Host = "test2.localhost"
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
|
||||||
|
|
||||||
|
resp, err = http.Get(traefikEtcdURL)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TestGlobalConfiguration(c *check.C) {
|
||||||
|
err := s.kv.Put("/traefik/entrypoints/http/address", []byte(":8001"), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// wait for etcd
|
||||||
|
err = try.Do(60*time.Second, func() error {
|
||||||
|
_, err := s.kv.Exists("/traefik/entrypoints/http/address", nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// start traefik
|
||||||
|
cmd, display := s.traefikCmd(
|
||||||
|
withConfigFile("fixtures/simple_web.toml"),
|
||||||
|
"--etcd",
|
||||||
|
"--etcd.endpoint="+ipEtcd+":4001",
|
||||||
|
"--etcd.useAPIV3=true")
|
||||||
|
defer display(c)
|
||||||
|
err = cmd.Start()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
backend1 := map[string]string{
|
||||||
|
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
|
||||||
|
"/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server1/weight": "10",
|
||||||
|
"/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server2/weight": "1",
|
||||||
|
}
|
||||||
|
backend2 := map[string]string{
|
||||||
|
"/traefik/backends/backend2/loadbalancer/method": "drr",
|
||||||
|
"/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server1/weight": "1",
|
||||||
|
"/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server2/weight": "2",
|
||||||
|
}
|
||||||
|
frontend1 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend1/backend": "backend2",
|
||||||
|
"/traefik/frontends/frontend1/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend1/priority": "1",
|
||||||
|
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost",
|
||||||
|
}
|
||||||
|
frontend2 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend2/backend": "backend1",
|
||||||
|
"/traefik/frontends/frontend2/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend2/priority": "10",
|
||||||
|
"/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test",
|
||||||
|
}
|
||||||
|
for key, value := range backend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range backend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for etcd
|
||||||
|
err = try.Do(60*time.Second, func() error {
|
||||||
|
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Path:/test"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
//check
|
||||||
|
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8001/", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
req.Host = "test.localhost"
|
||||||
|
|
||||||
|
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TestCertificatesContentstWithSNIConfigHandshake(c *check.C) {
|
||||||
|
// start traefik
|
||||||
|
cmd, display := s.traefikCmd(
|
||||||
|
withConfigFile("fixtures/simple_web.toml"),
|
||||||
|
"--etcd",
|
||||||
|
"--etcd.endpoint="+ipEtcd+":4001",
|
||||||
|
"--etcd.useAPIV3=true")
|
||||||
|
defer display(c)
|
||||||
|
|
||||||
|
//Copy the contents of the certificate files into ETCD
|
||||||
|
snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
globalConfig := map[string]string{
|
||||||
|
"/traefik/entrypoints/https/address": ":4443",
|
||||||
|
"/traefik/entrypoints/https/tls/certificates/0/certfile": string(snitestComCert),
|
||||||
|
"/traefik/entrypoints/https/tls/certificates/0/keyfile": string(snitestComKey),
|
||||||
|
"/traefik/entrypoints/https/tls/certificates/1/certfile": string(snitestOrgCert),
|
||||||
|
"/traefik/entrypoints/https/tls/certificates/1/keyfile": string(snitestOrgKey),
|
||||||
|
"/traefik/defaultentrypoints/0": "https",
|
||||||
|
}
|
||||||
|
|
||||||
|
backend1 := map[string]string{
|
||||||
|
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
|
||||||
|
"/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server1/weight": "10",
|
||||||
|
"/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80",
|
||||||
|
"/traefik/backends/backend1/servers/server2/weight": "1",
|
||||||
|
}
|
||||||
|
backend2 := map[string]string{
|
||||||
|
"/traefik/backends/backend2/loadbalancer/method": "drr",
|
||||||
|
"/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server1/weight": "1",
|
||||||
|
"/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80",
|
||||||
|
"/traefik/backends/backend2/servers/server2/weight": "2",
|
||||||
|
}
|
||||||
|
frontend1 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend1/backend": "backend2",
|
||||||
|
"/traefik/frontends/frontend1/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend1/priority": "1",
|
||||||
|
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com",
|
||||||
|
}
|
||||||
|
frontend2 := map[string]string{
|
||||||
|
"/traefik/frontends/frontend2/backend": "backend1",
|
||||||
|
"/traefik/frontends/frontend2/entrypoints": "http",
|
||||||
|
"/traefik/frontends/frontend2/priority": "10",
|
||||||
|
"/traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org",
|
||||||
|
}
|
||||||
|
for key, value := range globalConfig {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range backend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range backend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend1 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
for key, value := range frontend2 {
|
||||||
|
err := s.kv.Put(key, []byte(value), nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for etcd
|
||||||
|
err = try.Do(60*time.Second, try.KVExists(s.kv, "/traefik/frontends/frontend1/backend"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
err = cmd.Start()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:snitest.org"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
//check
|
||||||
|
tlsConfig := &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
ServerName: "snitest.com",
|
||||||
|
}
|
||||||
|
conn, err := tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||||
|
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
err = conn.Handshake()
|
||||||
|
c.Assert(err, checker.IsNil, check.Commentf("TLS handshake error"))
|
||||||
|
|
||||||
|
cs := conn.ConnectionState()
|
||||||
|
err = cs.PeerCertificates[0].VerifyHostname("snitest.com")
|
||||||
|
c.Assert(err, checker.IsNil, check.Commentf("certificate did not match SNI servername"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Etcd3Suite) TestCommandStoreConfig(c *check.C) {
|
||||||
|
cmd, display := s.traefikCmd(
|
||||||
|
"storeconfig",
|
||||||
|
withConfigFile("fixtures/simple_web.toml"),
|
||||||
|
"--etcd.endpoint="+ipEtcd+":4001",
|
||||||
|
"--etcd.useAPIV3=true")
|
||||||
|
defer display(c)
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// wait for traefik finish without error
|
||||||
|
cmd.Wait()
|
||||||
|
|
||||||
|
//CHECK
|
||||||
|
checkmap := map[string]string{
|
||||||
|
"/traefik/loglevel": "DEBUG",
|
||||||
|
"/traefik/defaultentrypoints/0": "http",
|
||||||
|
"/traefik/entrypoints/http/address": ":8000",
|
||||||
|
"/traefik/web/address": ":8080",
|
||||||
|
"/traefik/etcd/endpoint": ipEtcd + ":4001",
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range checkmap {
|
||||||
|
var p *store.KVPair
|
||||||
|
err = try.Do(60*time.Second, func() error {
|
||||||
|
p, err = s.kv.Get(key, nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
c.Assert(string(p.Value), checker.Equals, value)
|
||||||
|
}
|
||||||
|
}
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"github.com/containous/traefik/integration/try"
|
"github.com/containous/traefik/integration/try"
|
||||||
"github.com/docker/libkv"
|
"github.com/docker/libkv"
|
||||||
"github.com/docker/libkv/store"
|
"github.com/docker/libkv/store"
|
||||||
"github.com/docker/libkv/store/etcd"
|
"github.com/docker/libkv/store/etcd/v2"
|
||||||
"github.com/go-check/check"
|
"github.com/go-check/check"
|
||||||
|
|
||||||
checker "github.com/vdemeester/shakers"
|
checker "github.com/vdemeester/shakers"
|
||||||
|
@ -43,7 +43,7 @@ func (s *EtcdSuite) SetUpTest(c *check.C) {
|
||||||
|
|
||||||
// wait for etcd
|
// wait for etcd
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
_, err := kv.Exists("test")
|
_, err := kv.Exists("test", nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -61,7 +61,13 @@ func (s *EtcdSuite) TearDownSuite(c *check.C) {}
|
||||||
func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
|
func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
|
||||||
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
|
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
|
||||||
|
|
||||||
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost})
|
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
|
||||||
|
EtcdHost string
|
||||||
|
UseAPIV3 bool
|
||||||
|
}{
|
||||||
|
etcdHost,
|
||||||
|
false,
|
||||||
|
})
|
||||||
defer os.Remove(file)
|
defer os.Remove(file)
|
||||||
|
|
||||||
cmd, display := s.traefikCmd(withConfigFile(file))
|
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||||
|
@ -79,7 +85,13 @@ func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
|
||||||
func (s *EtcdSuite) TestNominalConfiguration(c *check.C) {
|
func (s *EtcdSuite) TestNominalConfiguration(c *check.C) {
|
||||||
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
|
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
|
||||||
|
|
||||||
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost})
|
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
|
||||||
|
EtcdHost string
|
||||||
|
UseAPIV3 bool
|
||||||
|
}{
|
||||||
|
etcdHost,
|
||||||
|
false,
|
||||||
|
})
|
||||||
defer os.Remove(file)
|
defer os.Remove(file)
|
||||||
|
|
||||||
cmd, display := s.traefikCmd(withConfigFile(file))
|
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||||
|
@ -138,7 +150,7 @@ func (s *EtcdSuite) TestNominalConfiguration(c *check.C) {
|
||||||
|
|
||||||
// wait for etcd
|
// wait for etcd
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule")
|
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -196,7 +208,7 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) {
|
||||||
|
|
||||||
// wait for etcd
|
// wait for etcd
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
_, err := s.kv.Exists("/traefik/entrypoints/http/address")
|
_, err := s.kv.Exists("/traefik/entrypoints/http/address", nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -261,7 +273,7 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) {
|
||||||
|
|
||||||
// wait for etcd
|
// wait for etcd
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule")
|
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -414,7 +426,7 @@ func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) {
|
||||||
for key, value := range checkmap {
|
for key, value := range checkmap {
|
||||||
var p *store.KVPair
|
var p *store.KVPair
|
||||||
err = try.Do(60*time.Second, func() error {
|
err = try.Do(60*time.Second, func() error {
|
||||||
p, err = s.kv.Get(key)
|
p, err = s.kv.Get(key, nil)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
|
@ -11,6 +11,7 @@ logLevel = "DEBUG"
|
||||||
endpoint = "{{.EtcdHost}}:2379"
|
endpoint = "{{.EtcdHost}}:2379"
|
||||||
prefix = "/traefik"
|
prefix = "/traefik"
|
||||||
watch = true
|
watch = true
|
||||||
|
useAPIV3 = {{.UseAPIV3}}
|
||||||
|
|
||||||
[web]
|
[web]
|
||||||
address = ":8081"
|
address = ":8081"
|
||||||
|
|
|
@ -43,8 +43,8 @@ func init() {
|
||||||
check.Suite(&ConsulSuite{})
|
check.Suite(&ConsulSuite{})
|
||||||
check.Suite(&DockerSuite{})
|
check.Suite(&DockerSuite{})
|
||||||
check.Suite(&DynamoDBSuite{})
|
check.Suite(&DynamoDBSuite{})
|
||||||
check.Suite(&ErrorPagesSuite{})
|
|
||||||
check.Suite(&EtcdSuite{})
|
check.Suite(&EtcdSuite{})
|
||||||
|
check.Suite(&ErrorPagesSuite{})
|
||||||
check.Suite(&EurekaSuite{})
|
check.Suite(&EurekaSuite{})
|
||||||
check.Suite(&FileSuite{})
|
check.Suite(&FileSuite{})
|
||||||
check.Suite(&GRPCSuite{})
|
check.Suite(&GRPCSuite{})
|
||||||
|
@ -61,6 +61,7 @@ func init() {
|
||||||
if *host {
|
if *host {
|
||||||
// tests launched from the host
|
// tests launched from the host
|
||||||
check.Suite(&ProxyProtocolSuite{})
|
check.Suite(&ProxyProtocolSuite{})
|
||||||
|
check.Suite(&Etcd3Suite{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
62
integration/resources/compose/etcd3.yml
Normal file
62
integration/resources/compose/etcd3.yml
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
version: '2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
etcd:
|
||||||
|
image: quay.io/coreos/etcd:v3.2.9
|
||||||
|
command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://172.18.0.2:2380 --listen-peer-urls http://172.18.0.2:2380 --advertise-client-urls http://172.18.0.2:2379,http://172.18.0.2:4001 --listen-client-urls http://172.18.0.2:2379,http://172.18.0.2:4001 --initial-cluster node1=http://172.18.0.2:2380 --debug
|
||||||
|
expose:
|
||||||
|
- 2380
|
||||||
|
- 2379
|
||||||
|
- 4001
|
||||||
|
- 7001
|
||||||
|
# networks:
|
||||||
|
# etcd_net:
|
||||||
|
# ipv4_address: 172.10.1.2
|
||||||
|
|
||||||
|
whoami1:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
|
||||||
|
# Remove it ASAP
|
||||||
|
depends_on:
|
||||||
|
- etcd
|
||||||
|
# networks:
|
||||||
|
# etcd_net:
|
||||||
|
# ipv4_address: 172.10.1.3
|
||||||
|
|
||||||
|
whoami2:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
|
||||||
|
# Remove it ASAP
|
||||||
|
depends_on:
|
||||||
|
- whoami1
|
||||||
|
# networks:
|
||||||
|
# etcd_net:
|
||||||
|
# ipv4_address: 172.10.1.4
|
||||||
|
|
||||||
|
whoami3:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
|
||||||
|
# Remove it ASAP
|
||||||
|
depends_on:
|
||||||
|
- whoami2
|
||||||
|
# networks:
|
||||||
|
# etcd_net:
|
||||||
|
# ipv4_address: 172.10.1.5
|
||||||
|
|
||||||
|
whoami4:
|
||||||
|
image: emilevauge/whoami
|
||||||
|
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
|
||||||
|
# Remove it ASAP
|
||||||
|
depends_on:
|
||||||
|
- whoami3
|
||||||
|
# networks:
|
||||||
|
# etcd_net:
|
||||||
|
# ipv4_address: 172.10.1.6
|
||||||
|
|
||||||
|
#networks:
|
||||||
|
# etcd_net:
|
||||||
|
# driver: bridge
|
||||||
|
# ipam:
|
||||||
|
# config:
|
||||||
|
# - subnet: 172.10.1.0/28
|
|
@ -89,7 +89,7 @@ type DoCondition func() error
|
||||||
// Verify if a Key exists in the store
|
// Verify if a Key exists in the store
|
||||||
func KVExists(kv store.Store, key string) DoCondition {
|
func KVExists(kv store.Store, key string) DoCondition {
|
||||||
return func() error {
|
return func() error {
|
||||||
_, err := kv.Exists(key)
|
_, err := kv.Exists(key, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,12 +3,14 @@ package etcd
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/containous/traefik/log"
|
||||||
"github.com/containous/traefik/provider"
|
"github.com/containous/traefik/provider"
|
||||||
"github.com/containous/traefik/provider/kv"
|
"github.com/containous/traefik/provider/kv"
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
"github.com/containous/traefik/types"
|
"github.com/containous/traefik/types"
|
||||||
"github.com/docker/libkv/store"
|
"github.com/docker/libkv/store"
|
||||||
"github.com/docker/libkv/store/etcd"
|
"github.com/docker/libkv/store/etcd/v2"
|
||||||
|
"github.com/docker/libkv/store/etcd/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ provider.Provider = (*Provider)(nil)
|
var _ provider.Provider = (*Provider)(nil)
|
||||||
|
@ -16,6 +18,7 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
kv.Provider `mapstructure:",squash" export:"true"`
|
kv.Provider `mapstructure:",squash" export:"true"`
|
||||||
|
UseAPIV3 bool `description:"Use ETCD API V3" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the etcd provider to Provide configurations to traefik
|
// Provide allows the etcd provider to Provide configurations to traefik
|
||||||
|
@ -31,7 +34,14 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||||
|
|
||||||
// CreateStore creates the KV store
|
// CreateStore creates the KV store
|
||||||
func (p *Provider) CreateStore() (store.Store, error) {
|
func (p *Provider) CreateStore() (store.Store, error) {
|
||||||
p.SetStoreType(store.ETCD)
|
if p.UseAPIV3 {
|
||||||
etcd.Register()
|
etcdv3.Register()
|
||||||
|
p.SetStoreType(store.ETCDV3)
|
||||||
|
} else {
|
||||||
|
// TODO: Deprecated
|
||||||
|
log.Warn("The ETCD API V2 is deprecated. Please use API V3 instead")
|
||||||
|
etcd.Register()
|
||||||
|
p.SetStoreType(store.ETCD)
|
||||||
|
}
|
||||||
return p.Provider.CreateStore()
|
return p.Provider.CreateStore()
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ func (p *Provider) SetKVClient(kvClient store.Store) {
|
||||||
|
|
||||||
func (p *Provider) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error {
|
func (p *Provider) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error {
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
events, err := p.kvclient.WatchTree(p.Prefix, make(chan struct{}))
|
events, err := p.kvclient.WatchTree(p.Prefix, make(chan struct{}), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to KV WatchTree: %v", err)
|
return fmt.Errorf("Failed to KV WatchTree: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ func (p *Provider) watchKv(configurationChan chan<- types.ConfigMessage, prefix
|
||||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
|
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
|
||||||
p.Constraints = append(p.Constraints, constraints...)
|
p.Constraints = append(p.Constraints, constraints...)
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
if _, err := p.kvclient.Exists(p.Prefix + "/qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj"); err != nil {
|
if _, err := p.kvclient.Exists(p.Prefix+"/qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj", nil); err != nil {
|
||||||
return fmt.Errorf("Failed to test KV store connection: %v", err)
|
return fmt.Errorf("Failed to test KV store connection: %v", err)
|
||||||
}
|
}
|
||||||
if p.Watch {
|
if p.Watch {
|
||||||
|
@ -165,7 +165,7 @@ func (p *Provider) loadConfig() *types.Configuration {
|
||||||
|
|
||||||
func (p *Provider) list(keys ...string) []string {
|
func (p *Provider) list(keys ...string) []string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keysPairs, err := p.kvclient.List(joinedKeys)
|
keysPairs, err := p.kvclient.List(joinedKeys, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Cannot get keys %s %s ", joinedKeys, err)
|
log.Debugf("Cannot get keys %s %s ", joinedKeys, err)
|
||||||
return nil
|
return nil
|
||||||
|
@ -182,7 +182,7 @@ func (p *Provider) listServers(backend string) []string {
|
||||||
serverNames := p.list(backend, "/servers/")
|
serverNames := p.list(backend, "/servers/")
|
||||||
return fun.Filter(func(serverName string) bool {
|
return fun.Filter(func(serverName string) bool {
|
||||||
key := fmt.Sprint(serverName, "/url")
|
key := fmt.Sprint(serverName, "/url")
|
||||||
if _, err := p.kvclient.Get(key); err != nil {
|
if _, err := p.kvclient.Get(key, nil); err != nil {
|
||||||
if err != store.ErrKeyNotFound {
|
if err != store.ErrKeyNotFound {
|
||||||
log.Errorf("Failed to retrieve value for key %s: %s", key, err)
|
log.Errorf("Failed to retrieve value for key %s: %s", key, err)
|
||||||
}
|
}
|
||||||
|
@ -194,7 +194,10 @@ func (p *Provider) listServers(backend string) []string {
|
||||||
|
|
||||||
func (p *Provider) get(defaultValue string, keys ...string) string {
|
func (p *Provider) get(defaultValue string, keys ...string) string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keyPair, err := p.kvclient.Get(strings.TrimPrefix(joinedKeys, "/"))
|
if p.storeType == store.ETCD {
|
||||||
|
joinedKeys = strings.TrimPrefix(joinedKeys, "/")
|
||||||
|
}
|
||||||
|
keyPair, err := p.kvclient.Get(joinedKeys, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Cannot get key %s %s, setting default %s", joinedKeys, err, defaultValue)
|
log.Debugf("Cannot get key %s %s, setting default %s", joinedKeys, err, defaultValue)
|
||||||
return defaultValue
|
return defaultValue
|
||||||
|
@ -207,7 +210,7 @@ func (p *Provider) get(defaultValue string, keys ...string) string {
|
||||||
|
|
||||||
func (p *Provider) splitGet(keys ...string) []string {
|
func (p *Provider) splitGet(keys ...string) []string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keyPair, err := p.kvclient.Get(joinedKeys)
|
keyPair, err := p.kvclient.Get(joinedKeys, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Cannot get key %s %s, setting default empty", joinedKeys, err)
|
log.Debugf("Cannot get key %s %s, setting default empty", joinedKeys, err)
|
||||||
return []string{}
|
return []string{}
|
||||||
|
@ -225,7 +228,7 @@ func (p *Provider) last(key string) string {
|
||||||
|
|
||||||
func (p *Provider) checkConstraints(keys ...string) bool {
|
func (p *Provider) checkConstraints(keys ...string) bool {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keyPair, err := p.kvclient.Get(joinedKeys)
|
keyPair, err := p.kvclient.Get(joinedKeys, nil)
|
||||||
|
|
||||||
value := ""
|
value := ""
|
||||||
if err == nil && keyPair != nil && keyPair.Value != nil {
|
if err == nil && keyPair != nil && keyPair.Value != nil {
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||||
return errors.New("Put not supported")
|
return errors.New("Put not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Mock) Get(key string) (*store.KVPair, error) {
|
func (s *Mock) Get(key string, options *store.ReadOptions) (*store.KVPair, error) {
|
||||||
if err := s.Error.Get; err != nil {
|
if err := s.Error.Get; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ func (s *Mock) Delete(key string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists mock
|
// Exists mock
|
||||||
func (s *Mock) Exists(key string) (bool, error) {
|
func (s *Mock) Exists(key string, options *store.ReadOptions) (bool, error) {
|
||||||
if err := s.Error.Get; err != nil {
|
if err := s.Error.Get; err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -63,12 +63,12 @@ func (s *Mock) Exists(key string) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch mock
|
// Watch mock
|
||||||
func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
func (s *Mock) Watch(key string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan *store.KVPair, error) {
|
||||||
return nil, errors.New("Watch not supported")
|
return nil, errors.New("Watch not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchTree mock
|
// WatchTree mock
|
||||||
func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan []*store.KVPair, error) {
|
||||||
return s.WatchTreeMethod(), nil
|
return s.WatchTreeMethod(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, er
|
||||||
}
|
}
|
||||||
|
|
||||||
// List mock
|
// List mock
|
||||||
func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
|
func (s *Mock) List(prefix string, options *store.ReadOptions) ([]*store.KVPair, error) {
|
||||||
if err := s.Error.List; err != nil {
|
if err := s.Error.List; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -383,7 +383,7 @@ func TestKVHasStickinessLabel(t *testing.T) {
|
||||||
desc: "with cookie name without stickiness=true",
|
desc: "with cookie name without stickiness=true",
|
||||||
KVPairs: []*store.KVPair{
|
KVPairs: []*store.KVPair{
|
||||||
{
|
{
|
||||||
Key: "loadbalancer/stickiness/cookiename",
|
Key: "/loadbalancer/stickiness/cookiename",
|
||||||
Value: []byte("foo"),
|
Value: []byte("foo"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -393,7 +393,7 @@ func TestKVHasStickinessLabel(t *testing.T) {
|
||||||
desc: "stickiness=true",
|
desc: "stickiness=true",
|
||||||
KVPairs: []*store.KVPair{
|
KVPairs: []*store.KVPair{
|
||||||
{
|
{
|
||||||
Key: "loadbalancer/stickiness",
|
Key: "/loadbalancer/stickiness",
|
||||||
Value: []byte("true"),
|
Value: []byte("true"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
43
vendor/github.com/containous/staert/kv.go
generated
vendored
43
vendor/github.com/containous/staert/kv.go
generated
vendored
|
@ -5,19 +5,20 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/containous/flaeg"
|
|
||||||
"github.com/docker/libkv"
|
|
||||||
"github.com/docker/libkv/store"
|
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containous/flaeg"
|
||||||
|
"github.com/docker/libkv"
|
||||||
|
"github.com/docker/libkv/store"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KvSource implements Source
|
// KvSource implements Source
|
||||||
// It handles all mapstructure features(Squashed Embeded Sub-Structures, Maps, Pointers)
|
// It handles all mapstructure features(Squashed Embedded Sub-Structures, Maps, Pointers)
|
||||||
// It supports Slices (and maybe Arraies). They must be sorted in the KvStore like this :
|
// It supports Slices (and maybe Arrays). They must be sorted in the KvStore like this :
|
||||||
// Key : ".../[sliceIndex]" -> Value
|
// Key : ".../[sliceIndex]" -> Value
|
||||||
type KvSource struct {
|
type KvSource struct {
|
||||||
store.Store
|
store.Store
|
||||||
|
@ -26,8 +27,8 @@ type KvSource struct {
|
||||||
|
|
||||||
// NewKvSource creates a new KvSource
|
// NewKvSource creates a new KvSource
|
||||||
func NewKvSource(backend store.Backend, addrs []string, options *store.Config, prefix string) (*KvSource, error) {
|
func NewKvSource(backend store.Backend, addrs []string, options *store.Config, prefix string) (*KvSource, error) {
|
||||||
store, err := libkv.NewStore(backend, addrs, options)
|
kvStore, err := libkv.NewStore(backend, addrs, options)
|
||||||
return &KvSource{Store: store, Prefix: prefix}, err
|
return &KvSource{Store: kvStore, Prefix: prefix}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse uses libkv and mapstructure to fill the structure
|
// Parse uses libkv and mapstructure to fill the structure
|
||||||
|
@ -46,11 +47,11 @@ func (kv *KvSource) LoadConfig(config interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fmt.Printf("pairs : %#v\n", pairs)
|
// fmt.Printf("pairs : %#v\n", pairs)
|
||||||
mapstruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
|
mapStruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fmt.Printf("mapstruct : %#v\n", mapstruct)
|
// fmt.Printf("mapStruct : %#v\n", mapStruct)
|
||||||
configDecoder := &mapstructure.DecoderConfig{
|
configDecoder := &mapstructure.DecoderConfig{
|
||||||
Metadata: nil,
|
Metadata: nil,
|
||||||
Result: config,
|
Result: config,
|
||||||
|
@ -61,7 +62,7 @@ func (kv *KvSource) LoadConfig(config interface{}) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := decoder.Decode(mapstruct); err != nil {
|
if err := decoder.Decode(mapStruct); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -82,8 +83,8 @@ func generateMapstructure(pairs []*store.KVPair, prefix string) (map[string]inte
|
||||||
}
|
}
|
||||||
|
|
||||||
func processKV(key string, v []byte, raw map[string]interface{}) (map[string]interface{}, error) {
|
func processKV(key string, v []byte, raw map[string]interface{}) (map[string]interface{}, error) {
|
||||||
// Determine which map we're writing the value to. We split by '/'
|
// Determine which map we're writing the value to.
|
||||||
// to determine any sub-maps that need to be created.
|
// We split by '/' to determine any sub-maps that need to be created.
|
||||||
m := raw
|
m := raw
|
||||||
children := strings.Split(key, "/")
|
children := strings.Split(key, "/")
|
||||||
if len(children) > 0 {
|
if len(children) > 0 {
|
||||||
|
@ -113,7 +114,7 @@ func decodeHook(fromType reflect.Type, toType reflect.Type, data interface{}) (i
|
||||||
object := reflect.New(toType.Elem()).Interface()
|
object := reflect.New(toType.Elem()).Interface()
|
||||||
err := object.(encoding.TextUnmarshaler).UnmarshalText([]byte(data.(string)))
|
err := object.(encoding.TextUnmarshaler).UnmarshalText([]byte(data.(string)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error unmarshaling %v: %v", data, err)
|
return nil, fmt.Errorf("error unmarshaling %v: %v", data, err)
|
||||||
}
|
}
|
||||||
return object, nil
|
return object, nil
|
||||||
}
|
}
|
||||||
|
@ -170,7 +171,7 @@ func (kv *KvSource) StoreConfig(config interface{}) error {
|
||||||
if err := collateKvRecursive(reflect.ValueOf(config), kvMap, kv.Prefix); err != nil {
|
if err := collateKvRecursive(reflect.ValueOf(config), kvMap, kv.Prefix); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
keys := []string{}
|
var keys []string
|
||||||
for key := range kvMap {
|
for key := range kvMap {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
}
|
}
|
||||||
|
@ -198,7 +199,7 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||||
if marshaler, ok := objValue.Interface().(encoding.TextMarshaler); ok {
|
if marshaler, ok := objValue.Interface().(encoding.TextMarshaler); ok {
|
||||||
test, err := marshaler.MarshalText()
|
test, err := marshaler.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error marshaling key %s: %v", name, err)
|
return fmt.Errorf("error marshaling key %s: %v", name, err)
|
||||||
}
|
}
|
||||||
kv[name] = string(test)
|
kv[name] = string(test)
|
||||||
return nil
|
return nil
|
||||||
|
@ -252,7 +253,7 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
for _, k := range objValue.MapKeys() {
|
for _, k := range objValue.MapKeys() {
|
||||||
if k.Kind() == reflect.Struct {
|
if k.Kind() == reflect.Struct {
|
||||||
return errors.New("Struct as key not supported")
|
return errors.New("struct as key not supported")
|
||||||
}
|
}
|
||||||
name = key + "/" + fmt.Sprint(k)
|
name = key + "/" + fmt.Sprint(k)
|
||||||
if err := collateKvRecursive(objValue.MapIndex(k), kv, name); err != nil {
|
if err := collateKvRecursive(objValue.MapIndex(k), kv, name); err != nil {
|
||||||
|
@ -280,14 +281,14 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||||
kv[name] = fmt.Sprint(objValue)
|
kv[name] = fmt.Sprint(objValue)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Kind %s not supported", kind.String())
|
return fmt.Errorf("kind %s not supported", kind.String())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListRecursive lists all key value childrens under key
|
// ListRecursive lists all key value children under key
|
||||||
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||||
pairsN1, err := kv.List(key)
|
pairsN1, err := kv.List(key, nil)
|
||||||
if err == store.ErrKeyNotFound {
|
if err == store.ErrKeyNotFound {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -295,7 +296,7 @@ func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(pairsN1) == 0 {
|
if len(pairsN1) == 0 {
|
||||||
pairLeaf, err := kv.Get(key)
|
pairLeaf, err := kv.Get(key, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
25
vendor/github.com/containous/staert/staert.go
generated
vendored
25
vendor/github.com/containous/staert/staert.go
generated
vendored
|
@ -2,12 +2,13 @@ package staert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/containous/flaeg"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/containous/flaeg"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Source interface must be satisfy to Add any kink of Source to Staert as like as TomlFile or Flaeg
|
// Source interface must be satisfy to Add any kink of Source to Staert as like as TomlFile or Flaeg
|
||||||
|
@ -21,7 +22,7 @@ type Staert struct {
|
||||||
sources []Source
|
sources []Source
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStaert creats and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
|
// NewStaert creates and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
|
||||||
func NewStaert(rootCommand *flaeg.Command) *Staert {
|
func NewStaert(rootCommand *flaeg.Command) *Staert {
|
||||||
s := Staert{
|
s := Staert{
|
||||||
command: rootCommand,
|
command: rootCommand,
|
||||||
|
@ -62,7 +63,7 @@ func (s *Staert) LoadConfig() (interface{}, error) {
|
||||||
fCmdConfigType := reflect.TypeOf(fCmd.Config)
|
fCmdConfigType := reflect.TypeOf(fCmd.Config)
|
||||||
sCmdConfigType := reflect.TypeOf(s.command.Config)
|
sCmdConfigType := reflect.TypeOf(s.command.Config)
|
||||||
if fCmdConfigType != sCmdConfigType {
|
if fCmdConfigType != sCmdConfigType {
|
||||||
return nil, fmt.Errorf("Command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
|
return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
|
||||||
}
|
}
|
||||||
s.command = fCmd
|
s.command = fCmd
|
||||||
} else {
|
} else {
|
||||||
|
@ -90,7 +91,7 @@ type TomlSource struct {
|
||||||
fullpath string
|
fullpath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTomlSource creats and return a pointer on TomlSource.
|
// NewTomlSource creates and return a pointer on TomlSource.
|
||||||
// Parameter filename is the file name (without extension type, ".toml" will be added)
|
// Parameter filename is the file name (without extension type, ".toml" will be added)
|
||||||
// dirNfullpath may contain directories or fullpath to the file.
|
// dirNfullpath may contain directories or fullpath to the file.
|
||||||
func NewTomlSource(filename string, dirNfullpath []string) *TomlSource {
|
func NewTomlSource(filename string, dirNfullpath []string) *TomlSource {
|
||||||
|
@ -118,13 +119,13 @@ func preprocessDir(dirIn string) (string, error) {
|
||||||
func findFile(filename string, dirNfile []string) string {
|
func findFile(filename string, dirNfile []string) string {
|
||||||
for _, df := range dirNfile {
|
for _, df := range dirNfile {
|
||||||
if df != "" {
|
if df != "" {
|
||||||
fullpath, _ := preprocessDir(df)
|
fullPath, _ := preprocessDir(df)
|
||||||
if fileinfo, err := os.Stat(fullpath); err == nil && !fileinfo.IsDir() {
|
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
||||||
return fullpath
|
return fullPath
|
||||||
}
|
}
|
||||||
fullpath = fullpath + "/" + filename + ".toml"
|
fullPath = fullPath + "/" + filename + ".toml"
|
||||||
if fileinfo, err := os.Stat(fullpath); err == nil && !fileinfo.IsDir() {
|
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
||||||
return fullpath
|
return fullPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -167,7 +168,7 @@ func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
|
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
|
||||||
flaegArgs := []string{}
|
var flaegArgs []string
|
||||||
keys := metadata.Keys()
|
keys := metadata.Keys()
|
||||||
hasUnderField := false
|
hasUnderField := false
|
||||||
for i, key := range keys {
|
for i, key := range keys {
|
||||||
|
|
20
vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Ben Johnson
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
10
vendor/github.com/coreos/bbolt/bolt_386.go
generated
vendored
Normal file
10
vendor/github.com/coreos/bbolt/bolt_386.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
10
vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
10
vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
28
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
28
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Simple check to see whether this arch handles unaligned load/stores
|
||||||
|
// correctly.
|
||||||
|
|
||||||
|
// ARM9 and older devices require load/stores to be from/to aligned
|
||||||
|
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||||
|
// read in a jumbled up order.
|
||||||
|
|
||||||
|
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||||
|
|
||||||
|
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||||
|
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||||
|
|
||||||
|
brokenUnaligned = val != 0x11222211
|
||||||
|
}
|
12
vendor/github.com/coreos/bbolt/bolt_arm64.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_arm64.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build arm64
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
10
vendor/github.com/coreos/bbolt/bolt_linux.go
generated
vendored
Normal file
10
vendor/github.com/coreos/bbolt/bolt_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fdatasync flushes written data to a file descriptor.
|
||||||
|
func fdatasync(db *DB) error {
|
||||||
|
return syscall.Fdatasync(int(db.file.Fd()))
|
||||||
|
}
|
12
vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build mips64 mips64le
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x8000000000 // 512GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
12
vendor/github.com/coreos/bbolt/bolt_mipsx.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_mipsx.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build mips mipsle
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x40000000 // 1GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
27
vendor/github.com/coreos/bbolt/bolt_openbsd.go
generated
vendored
Normal file
27
vendor/github.com/coreos/bbolt/bolt_openbsd.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
msAsync = 1 << iota // perform asynchronous writes
|
||||||
|
msSync // perform synchronous writes
|
||||||
|
msInvalidate // invalidate cached data
|
||||||
|
)
|
||||||
|
|
||||||
|
func msync(db *DB) error {
|
||||||
|
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fdatasync(db *DB) error {
|
||||||
|
if db.data != nil {
|
||||||
|
return msync(db)
|
||||||
|
}
|
||||||
|
return db.file.Sync()
|
||||||
|
}
|
9
vendor/github.com/coreos/bbolt/bolt_ppc.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_ppc.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build ppc
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0xFFFFFFF
|
12
vendor/github.com/coreos/bbolt/bolt_ppc64.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_ppc64.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build ppc64
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
12
vendor/github.com/coreos/bbolt/bolt_ppc64le.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_ppc64le.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build ppc64le
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
12
vendor/github.com/coreos/bbolt/bolt_s390x.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_s390x.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build s390x
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
92
vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
Normal file
92
vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
// +build !windows,!plan9,!solaris
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// flock acquires an advisory lock on a file descriptor.
|
||||||
|
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||||
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := db.file.Fd()
|
||||||
|
flag := syscall.LOCK_NB
|
||||||
|
if exclusive {
|
||||||
|
flag |= syscall.LOCK_EX
|
||||||
|
} else {
|
||||||
|
flag |= syscall.LOCK_SH
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Attempt to obtain an exclusive lock.
|
||||||
|
err := syscall.Flock(int(fd), flag)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
} else if err != syscall.EWOULDBLOCK {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we timed out then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for a bit and try again.
|
||||||
|
time.Sleep(flockRetryTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// funlock releases an advisory lock on a file descriptor.
|
||||||
|
func funlock(db *DB) error {
|
||||||
|
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mmap memory maps a DB's data file.
|
||||||
|
func mmap(db *DB, sz int) error {
|
||||||
|
// Map the data file to memory.
|
||||||
|
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advise the kernel that the mmap is accessed randomly.
|
||||||
|
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||||
|
return fmt.Errorf("madvise: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the original byte slice and convert to a byte array pointer.
|
||||||
|
db.dataref = b
|
||||||
|
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||||
|
db.datasz = sz
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// munmap unmaps a DB's data file from memory.
|
||||||
|
func munmap(db *DB) error {
|
||||||
|
// Ignore the unmap if we have no mapped data.
|
||||||
|
if db.dataref == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmap using the original byte slice.
|
||||||
|
err := syscall.Munmap(db.dataref)
|
||||||
|
db.dataref = nil
|
||||||
|
db.data = nil
|
||||||
|
db.datasz = 0
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||||
|
func madvise(b []byte, advice int) (err error) {
|
||||||
|
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
89
vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
generated
vendored
Normal file
89
vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// flock acquires an advisory lock on a file descriptor.
|
||||||
|
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||||
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := db.file.Fd()
|
||||||
|
var lockType int16
|
||||||
|
if exclusive {
|
||||||
|
lockType = syscall.F_WRLCK
|
||||||
|
} else {
|
||||||
|
lockType = syscall.F_RDLCK
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Attempt to obtain an exclusive lock.
|
||||||
|
lock := syscall.Flock_t{Type: lockType}
|
||||||
|
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
} else if err != syscall.EAGAIN {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we timed out then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for a bit and try again.
|
||||||
|
time.Sleep(flockRetryTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// funlock releases an advisory lock on a file descriptor.
|
||||||
|
func funlock(db *DB) error {
|
||||||
|
var lock syscall.Flock_t
|
||||||
|
lock.Start = 0
|
||||||
|
lock.Len = 0
|
||||||
|
lock.Type = syscall.F_UNLCK
|
||||||
|
lock.Whence = 0
|
||||||
|
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mmap memory maps a DB's data file.
|
||||||
|
func mmap(db *DB, sz int) error {
|
||||||
|
// Map the data file to memory.
|
||||||
|
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advise the kernel that the mmap is accessed randomly.
|
||||||
|
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||||
|
return fmt.Errorf("madvise: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the original byte slice and convert to a byte array pointer.
|
||||||
|
db.dataref = b
|
||||||
|
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||||
|
db.datasz = sz
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// munmap unmaps a DB's data file from memory.
|
||||||
|
func munmap(db *DB) error {
|
||||||
|
// Ignore the unmap if we have no mapped data.
|
||||||
|
if db.dataref == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmap using the original byte slice.
|
||||||
|
err := unix.Munmap(db.dataref)
|
||||||
|
db.dataref = nil
|
||||||
|
db.data = nil
|
||||||
|
db.datasz = 0
|
||||||
|
return err
|
||||||
|
}
|
145
vendor/github.com/coreos/bbolt/bolt_windows.go
generated
vendored
Normal file
145
vendor/github.com/coreos/bbolt/bolt_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||||
|
var (
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||||
|
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lockExt = ".lock"
|
||||||
|
|
||||||
|
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||||
|
flagLockExclusive = 2
|
||||||
|
flagLockFailImmediately = 1
|
||||||
|
|
||||||
|
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||||
|
errLockViolation syscall.Errno = 0x21
|
||||||
|
)
|
||||||
|
|
||||||
|
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||||
|
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||||
|
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdatasync flushes written data to a file descriptor.
|
||||||
|
func fdatasync(db *DB) error {
|
||||||
|
return db.file.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
// flock acquires an advisory lock on a file descriptor.
|
||||||
|
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||||
|
// Create a separate lock file on windows because a process
|
||||||
|
// cannot share an exclusive lock on the same file. This is
|
||||||
|
// needed during Tx.WriteTo().
|
||||||
|
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
db.lockfile = f
|
||||||
|
|
||||||
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := f.Fd()
|
||||||
|
var flag uint32 = flagLockFailImmediately
|
||||||
|
if exclusive {
|
||||||
|
flag |= flagLockExclusive
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Attempt to obtain an exclusive lock.
|
||||||
|
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
} else if err != errLockViolation {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we timed oumercit then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for a bit and try again.
|
||||||
|
time.Sleep(flockRetryTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// funlock releases an advisory lock on a file descriptor.
|
||||||
|
func funlock(db *DB) error {
|
||||||
|
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||||
|
db.lockfile.Close()
|
||||||
|
os.Remove(db.path + lockExt)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// mmap memory maps a DB's data file.
|
||||||
|
// Based on: https://github.com/edsrzf/mmap-go
|
||||||
|
func mmap(db *DB, sz int) error {
|
||||||
|
if !db.readOnly {
|
||||||
|
// Truncate the database to the size of the mmap.
|
||||||
|
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||||
|
return fmt.Errorf("truncate: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a file mapping handle.
|
||||||
|
sizelo := uint32(sz >> 32)
|
||||||
|
sizehi := uint32(sz) & 0xffffffff
|
||||||
|
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||||
|
if h == 0 {
|
||||||
|
return os.NewSyscallError("CreateFileMapping", errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the memory map.
|
||||||
|
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||||
|
if addr == 0 {
|
||||||
|
return os.NewSyscallError("MapViewOfFile", errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close mapping handle.
|
||||||
|
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||||
|
return os.NewSyscallError("CloseHandle", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to a byte array.
|
||||||
|
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||||
|
db.datasz = sz
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// munmap unmaps a pointer from a file.
|
||||||
|
// Based on: https://github.com/edsrzf/mmap-go
|
||||||
|
func munmap(db *DB) error {
|
||||||
|
if db.data == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||||
|
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||||
|
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
8
vendor/github.com/coreos/bbolt/boltsync_unix.go
generated
vendored
Normal file
8
vendor/github.com/coreos/bbolt/boltsync_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !windows,!plan9,!linux,!openbsd
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// fdatasync flushes written data to a file descriptor.
|
||||||
|
func fdatasync(db *DB) error {
|
||||||
|
return db.file.Sync()
|
||||||
|
}
|
775
vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
Normal file
775
vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
Normal file
|
@ -0,0 +1,775 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxKeySize is the maximum length of a key, in bytes.
|
||||||
|
MaxKeySize = 32768
|
||||||
|
|
||||||
|
// MaxValueSize is the maximum length of a value, in bytes.
|
||||||
|
MaxValueSize = (1 << 31) - 2
|
||||||
|
)
|
||||||
|
|
||||||
|
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||||
|
|
||||||
|
const (
|
||||||
|
minFillPercent = 0.1
|
||||||
|
maxFillPercent = 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultFillPercent is the percentage that split pages are filled.
|
||||||
|
// This value can be changed by setting Bucket.FillPercent.
|
||||||
|
const DefaultFillPercent = 0.5
|
||||||
|
|
||||||
|
// Bucket represents a collection of key/value pairs inside the database.
|
||||||
|
type Bucket struct {
|
||||||
|
*bucket
|
||||||
|
tx *Tx // the associated transaction
|
||||||
|
buckets map[string]*Bucket // subbucket cache
|
||||||
|
page *page // inline page reference
|
||||||
|
rootNode *node // materialized node for the root page.
|
||||||
|
nodes map[pgid]*node // node cache
|
||||||
|
|
||||||
|
// Sets the threshold for filling nodes when they split. By default,
|
||||||
|
// the bucket will fill to 50% but it can be useful to increase this
|
||||||
|
// amount if you know that your write workloads are mostly append-only.
|
||||||
|
//
|
||||||
|
// This is non-persisted across transactions so it must be set in every Tx.
|
||||||
|
FillPercent float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucket represents the on-file representation of a bucket.
|
||||||
|
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||||
|
// then its root page can be stored inline in the "value", after the bucket
|
||||||
|
// header. In the case of inline buckets, the "root" will be 0.
|
||||||
|
type bucket struct {
|
||||||
|
root pgid // page id of the bucket's root-level page
|
||||||
|
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBucket returns a new bucket associated with a transaction.
|
||||||
|
func newBucket(tx *Tx) Bucket {
|
||||||
|
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||||
|
if tx.writable {
|
||||||
|
b.buckets = make(map[string]*Bucket)
|
||||||
|
b.nodes = make(map[pgid]*node)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tx returns the tx of the bucket.
|
||||||
|
func (b *Bucket) Tx() *Tx {
|
||||||
|
return b.tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns the root of the bucket.
|
||||||
|
func (b *Bucket) Root() pgid {
|
||||||
|
return b.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writable returns whether the bucket is writable.
|
||||||
|
func (b *Bucket) Writable() bool {
|
||||||
|
return b.tx.writable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cursor creates a cursor associated with the bucket.
|
||||||
|
// The cursor is only valid as long as the transaction is open.
|
||||||
|
// Do not use a cursor after the transaction is closed.
|
||||||
|
func (b *Bucket) Cursor() *Cursor {
|
||||||
|
// Update transaction statistics.
|
||||||
|
b.tx.stats.CursorCount++
|
||||||
|
|
||||||
|
// Allocate and return a cursor.
|
||||||
|
return &Cursor{
|
||||||
|
bucket: b,
|
||||||
|
stack: make([]elemRef, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket retrieves a nested bucket by name.
|
||||||
|
// Returns nil if the bucket does not exist.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||||
|
if b.buckets != nil {
|
||||||
|
if child := b.buckets[string(name)]; child != nil {
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to key.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, v, flags := c.seek(name)
|
||||||
|
|
||||||
|
// Return nil if the key doesn't exist or it is not a bucket.
|
||||||
|
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise create a bucket and cache it.
|
||||||
|
var child = b.openBucket(v)
|
||||||
|
if b.buckets != nil {
|
||||||
|
b.buckets[string(name)] = child
|
||||||
|
}
|
||||||
|
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper method that re-interprets a sub-bucket value
|
||||||
|
// from a parent into a Bucket
|
||||||
|
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||||
|
var child = newBucket(b.tx)
|
||||||
|
|
||||||
|
// If unaligned load/stores are broken on this arch and value is
|
||||||
|
// unaligned simply clone to an aligned byte array.
|
||||||
|
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||||
|
|
||||||
|
if unaligned {
|
||||||
|
value = cloneBytes(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is a writable transaction then we need to copy the bucket entry.
|
||||||
|
// Read-only transactions can point directly at the mmap entry.
|
||||||
|
if b.tx.writable && !unaligned {
|
||||||
|
child.bucket = &bucket{}
|
||||||
|
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
} else {
|
||||||
|
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save a reference to the inline page if the bucket is inline.
|
||||||
|
if child.root == 0 {
|
||||||
|
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &child
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||||
|
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return nil, ErrTxClosed
|
||||||
|
} else if !b.tx.writable {
|
||||||
|
return nil, ErrTxNotWritable
|
||||||
|
} else if len(key) == 0 {
|
||||||
|
return nil, ErrBucketNameRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if there is an existing key.
|
||||||
|
if bytes.Equal(key, k) {
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return nil, ErrBucketExists
|
||||||
|
}
|
||||||
|
return nil, ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create empty, inline bucket.
|
||||||
|
var bucket = Bucket{
|
||||||
|
bucket: &bucket{},
|
||||||
|
rootNode: &node{isLeaf: true},
|
||||||
|
FillPercent: DefaultFillPercent,
|
||||||
|
}
|
||||||
|
var value = bucket.write()
|
||||||
|
|
||||||
|
// Insert into node.
|
||||||
|
key = cloneBytes(key)
|
||||||
|
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||||
|
|
||||||
|
// Since subbuckets are not allowed on inline buckets, we need to
|
||||||
|
// dereference the inline page, if it exists. This will cause the bucket
|
||||||
|
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||||
|
b.page = nil
|
||||||
|
|
||||||
|
return b.Bucket(key), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||||
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||||
|
child, err := b.CreateBucket(key)
|
||||||
|
if err == ErrBucketExists {
|
||||||
|
return b.Bucket(key), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return child, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBucket deletes a bucket at the given key.
|
||||||
|
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||||
|
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if bucket doesn't exist or is not a bucket.
|
||||||
|
if !bytes.Equal(key, k) {
|
||||||
|
return ErrBucketNotFound
|
||||||
|
} else if (flags & bucketLeafFlag) == 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively delete all child buckets.
|
||||||
|
child := b.Bucket(key)
|
||||||
|
err := child.ForEach(func(k, v []byte) error {
|
||||||
|
if v == nil {
|
||||||
|
if err := child.DeleteBucket(k); err != nil {
|
||||||
|
return fmt.Errorf("delete bucket: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove cached copy.
|
||||||
|
delete(b.buckets, string(key))
|
||||||
|
|
||||||
|
// Release all bucket pages to freelist.
|
||||||
|
child.nodes = nil
|
||||||
|
child.rootNode = nil
|
||||||
|
child.free()
|
||||||
|
|
||||||
|
// Delete the node if we have a matching key.
|
||||||
|
c.node().del(key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the value for a key in the bucket.
|
||||||
|
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||||
|
// The returned value is only valid for the life of the transaction.
|
||||||
|
func (b *Bucket) Get(key []byte) []byte {
|
||||||
|
k, v, flags := b.Cursor().seek(key)
|
||||||
|
|
||||||
|
// Return nil if this is a bucket.
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If our target node isn't the same key as what's passed in then return nil.
|
||||||
|
if !bytes.Equal(key, k) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put sets the value for a key in the bucket.
|
||||||
|
// If the key exist then its previous value will be overwritten.
|
||||||
|
// Supplied value must remain valid for the life of the transaction.
|
||||||
|
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||||
|
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
} else if len(key) == 0 {
|
||||||
|
return ErrKeyRequired
|
||||||
|
} else if len(key) > MaxKeySize {
|
||||||
|
return ErrKeyTooLarge
|
||||||
|
} else if int64(len(value)) > MaxValueSize {
|
||||||
|
return ErrValueTooLarge
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if there is an existing key with a bucket value.
|
||||||
|
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert into node.
|
||||||
|
key = cloneBytes(key)
|
||||||
|
c.node().put(key, key, value, 0, 0)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a key from the bucket.
|
||||||
|
// If the key does not exist then nothing is done and a nil error is returned.
|
||||||
|
// Returns an error if the bucket was created from a read-only transaction.
|
||||||
|
func (b *Bucket) Delete(key []byte) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return nil if the key doesn't exist.
|
||||||
|
if !bytes.Equal(key, k) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if there is already existing bucket value.
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the node if we have a matching key.
|
||||||
|
c.node().del(key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sequence returns the current integer for the bucket without incrementing it.
|
||||||
|
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||||
|
|
||||||
|
// SetSequence updates the sequence number for the bucket.
|
||||||
|
func (b *Bucket) SetSequence(v uint64) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Materialize the root node if it hasn't been already so that the
|
||||||
|
// bucket will be saved during commit.
|
||||||
|
if b.rootNode == nil {
|
||||||
|
_ = b.node(b.root, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment and return the sequence.
|
||||||
|
b.bucket.sequence = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextSequence returns an autoincrementing integer for the bucket.
|
||||||
|
func (b *Bucket) NextSequence() (uint64, error) {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return 0, ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return 0, ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Materialize the root node if it hasn't been already so that the
|
||||||
|
// bucket will be saved during commit.
|
||||||
|
if b.rootNode == nil {
|
||||||
|
_ = b.node(b.root, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment and return the sequence.
|
||||||
|
b.bucket.sequence++
|
||||||
|
return b.bucket.sequence, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEach executes a function for each key/value pair in a bucket.
|
||||||
|
// If the provided function returns an error then the iteration is stopped and
|
||||||
|
// the error is returned to the caller. The provided function must not modify
|
||||||
|
// the bucket; this will result in undefined behavior.
|
||||||
|
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
}
|
||||||
|
c := b.Cursor()
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
if err := fn(k, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns stats on a bucket.
|
||||||
|
func (b *Bucket) Stats() BucketStats {
|
||||||
|
var s, subStats BucketStats
|
||||||
|
pageSize := b.tx.db.pageSize
|
||||||
|
s.BucketN += 1
|
||||||
|
if b.root == 0 {
|
||||||
|
s.InlineBucketN += 1
|
||||||
|
}
|
||||||
|
b.forEachPage(func(p *page, depth int) {
|
||||||
|
if (p.flags & leafPageFlag) != 0 {
|
||||||
|
s.KeyN += int(p.count)
|
||||||
|
|
||||||
|
// used totals the used bytes for the page
|
||||||
|
used := pageHeaderSize
|
||||||
|
|
||||||
|
if p.count != 0 {
|
||||||
|
// If page has any elements, add all element headers.
|
||||||
|
used += leafPageElementSize * int(p.count-1)
|
||||||
|
|
||||||
|
// Add all element key, value sizes.
|
||||||
|
// The computation takes advantage of the fact that the position
|
||||||
|
// of the last element's key/value equals to the total of the sizes
|
||||||
|
// of all previous elements' keys and values.
|
||||||
|
// It also includes the last element's header.
|
||||||
|
lastElement := p.leafPageElement(p.count - 1)
|
||||||
|
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.root == 0 {
|
||||||
|
// For inlined bucket just update the inline stats
|
||||||
|
s.InlineBucketInuse += used
|
||||||
|
} else {
|
||||||
|
// For non-inlined bucket update all the leaf stats
|
||||||
|
s.LeafPageN++
|
||||||
|
s.LeafInuse += used
|
||||||
|
s.LeafOverflowN += int(p.overflow)
|
||||||
|
|
||||||
|
// Collect stats from sub-buckets.
|
||||||
|
// Do that by iterating over all element headers
|
||||||
|
// looking for the ones with the bucketLeafFlag.
|
||||||
|
for i := uint16(0); i < p.count; i++ {
|
||||||
|
e := p.leafPageElement(i)
|
||||||
|
if (e.flags & bucketLeafFlag) != 0 {
|
||||||
|
// For any bucket element, open the element value
|
||||||
|
// and recursively call Stats on the contained bucket.
|
||||||
|
subStats.Add(b.openBucket(e.value()).Stats())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (p.flags & branchPageFlag) != 0 {
|
||||||
|
s.BranchPageN++
|
||||||
|
lastElement := p.branchPageElement(p.count - 1)
|
||||||
|
|
||||||
|
// used totals the used bytes for the page
|
||||||
|
// Add header and all element headers.
|
||||||
|
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||||
|
|
||||||
|
// Add size of all keys and values.
|
||||||
|
// Again, use the fact that last element's position equals to
|
||||||
|
// the total of key, value sizes of all previous elements.
|
||||||
|
used += int(lastElement.pos + lastElement.ksize)
|
||||||
|
s.BranchInuse += used
|
||||||
|
s.BranchOverflowN += int(p.overflow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep track of maximum page depth.
|
||||||
|
if depth+1 > s.Depth {
|
||||||
|
s.Depth = (depth + 1)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Alloc stats can be computed from page counts and pageSize.
|
||||||
|
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||||
|
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||||
|
|
||||||
|
// Add the max depth of sub-buckets to get total nested depth.
|
||||||
|
s.Depth += subStats.Depth
|
||||||
|
// Add the stats for all sub-buckets
|
||||||
|
s.Add(subStats)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||||
|
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||||
|
// If we have an inline page then just use that.
|
||||||
|
if b.page != nil {
|
||||||
|
fn(b.page, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise traverse the page hierarchy.
|
||||||
|
b.tx.forEachPage(b.root, 0, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||||
|
// This also includes inline pages.
|
||||||
|
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||||
|
// If we have an inline page or root node then just use that.
|
||||||
|
if b.page != nil {
|
||||||
|
fn(b.page, nil, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b._forEachPageNode(b.root, 0, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||||
|
var p, n = b.pageNode(pgid)
|
||||||
|
|
||||||
|
// Execute function.
|
||||||
|
fn(p, n, depth)
|
||||||
|
|
||||||
|
// Recursively loop over children.
|
||||||
|
if p != nil {
|
||||||
|
if (p.flags & branchPageFlag) != 0 {
|
||||||
|
for i := 0; i < int(p.count); i++ {
|
||||||
|
elem := p.branchPageElement(uint16(i))
|
||||||
|
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !n.isLeaf {
|
||||||
|
for _, inode := range n.inodes {
|
||||||
|
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// spill writes all the nodes for this bucket to dirty pages.
|
||||||
|
func (b *Bucket) spill() error {
|
||||||
|
// Spill all child buckets first.
|
||||||
|
for name, child := range b.buckets {
|
||||||
|
// If the child bucket is small enough and it has no child buckets then
|
||||||
|
// write it inline into the parent bucket's page. Otherwise spill it
|
||||||
|
// like a normal bucket and make the parent value a pointer to the page.
|
||||||
|
var value []byte
|
||||||
|
if child.inlineable() {
|
||||||
|
child.free()
|
||||||
|
value = child.write()
|
||||||
|
} else {
|
||||||
|
if err := child.spill(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the child bucket header in this bucket.
|
||||||
|
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||||
|
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
*bucket = *child.bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip writing the bucket if there are no materialized nodes.
|
||||||
|
if child.rootNode == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update parent node.
|
||||||
|
var c = b.Cursor()
|
||||||
|
k, _, flags := c.seek([]byte(name))
|
||||||
|
if !bytes.Equal([]byte(name), k) {
|
||||||
|
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||||
|
}
|
||||||
|
if flags&bucketLeafFlag == 0 {
|
||||||
|
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||||
|
}
|
||||||
|
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore if there's not a materialized root node.
|
||||||
|
if b.rootNode == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spill nodes.
|
||||||
|
if err := b.rootNode.spill(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.rootNode = b.rootNode.root()
|
||||||
|
|
||||||
|
// Update the root node for this bucket.
|
||||||
|
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||||
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||||
|
}
|
||||||
|
b.root = b.rootNode.pgid
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// inlineable returns true if a bucket is small enough to be written inline
|
||||||
|
// and if it contains no subbuckets. Otherwise returns false.
|
||||||
|
func (b *Bucket) inlineable() bool {
|
||||||
|
var n = b.rootNode
|
||||||
|
|
||||||
|
// Bucket must only contain a single leaf node.
|
||||||
|
if n == nil || !n.isLeaf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||||
|
// our threshold for inline bucket size.
|
||||||
|
var size = pageHeaderSize
|
||||||
|
for _, inode := range n.inodes {
|
||||||
|
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||||
|
|
||||||
|
if inode.flags&bucketLeafFlag != 0 {
|
||||||
|
return false
|
||||||
|
} else if size > b.maxInlineBucketSize() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||||
|
func (b *Bucket) maxInlineBucketSize() int {
|
||||||
|
return b.tx.db.pageSize / 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// write allocates and writes a bucket to a byte slice.
|
||||||
|
func (b *Bucket) write() []byte {
|
||||||
|
// Allocate the appropriate size.
|
||||||
|
var n = b.rootNode
|
||||||
|
var value = make([]byte, bucketHeaderSize+n.size())
|
||||||
|
|
||||||
|
// Write a bucket header.
|
||||||
|
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
*bucket = *b.bucket
|
||||||
|
|
||||||
|
// Convert byte slice to a fake page and write the root node.
|
||||||
|
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||||
|
n.write(p)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebalance attempts to balance all nodes.
|
||||||
|
func (b *Bucket) rebalance() {
|
||||||
|
for _, n := range b.nodes {
|
||||||
|
n.rebalance()
|
||||||
|
}
|
||||||
|
for _, child := range b.buckets {
|
||||||
|
child.rebalance()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// node creates a node from a page and associates it with a given parent.
|
||||||
|
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||||
|
_assert(b.nodes != nil, "nodes map expected")
|
||||||
|
|
||||||
|
// Retrieve node if it's already been created.
|
||||||
|
if n := b.nodes[pgid]; n != nil {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise create a node and cache it.
|
||||||
|
n := &node{bucket: b, parent: parent}
|
||||||
|
if parent == nil {
|
||||||
|
b.rootNode = n
|
||||||
|
} else {
|
||||||
|
parent.children = append(parent.children, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the inline page if this is an inline bucket.
|
||||||
|
var p = b.page
|
||||||
|
if p == nil {
|
||||||
|
p = b.tx.page(pgid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the page into the node and cache it.
|
||||||
|
n.read(p)
|
||||||
|
b.nodes[pgid] = n
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
b.tx.stats.NodeCount++
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// free recursively frees all pages in the bucket.
|
||||||
|
func (b *Bucket) free() {
|
||||||
|
if b.root == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var tx = b.tx
|
||||||
|
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||||
|
if p != nil {
|
||||||
|
tx.db.freelist.free(tx.meta.txid, p)
|
||||||
|
} else {
|
||||||
|
n.free()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
b.root = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// dereference removes all references to the old mmap.
|
||||||
|
func (b *Bucket) dereference() {
|
||||||
|
if b.rootNode != nil {
|
||||||
|
b.rootNode.root().dereference()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, child := range b.buckets {
|
||||||
|
child.dereference()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pageNode returns the in-memory node, if it exists.
|
||||||
|
// Otherwise returns the underlying page.
|
||||||
|
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||||
|
// Inline buckets have a fake page embedded in their value so treat them
|
||||||
|
// differently. We'll return the rootNode (if available) or the fake page.
|
||||||
|
if b.root == 0 {
|
||||||
|
if id != 0 {
|
||||||
|
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||||
|
}
|
||||||
|
if b.rootNode != nil {
|
||||||
|
return nil, b.rootNode
|
||||||
|
}
|
||||||
|
return b.page, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the node cache for non-inline buckets.
|
||||||
|
if b.nodes != nil {
|
||||||
|
if n := b.nodes[id]; n != nil {
|
||||||
|
return nil, n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally lookup the page from the transaction if no node is materialized.
|
||||||
|
return b.tx.page(id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketStats records statistics about resources used by a bucket.
|
||||||
|
type BucketStats struct {
|
||||||
|
// Page count statistics.
|
||||||
|
BranchPageN int // number of logical branch pages
|
||||||
|
BranchOverflowN int // number of physical branch overflow pages
|
||||||
|
LeafPageN int // number of logical leaf pages
|
||||||
|
LeafOverflowN int // number of physical leaf overflow pages
|
||||||
|
|
||||||
|
// Tree statistics.
|
||||||
|
KeyN int // number of keys/value pairs
|
||||||
|
Depth int // number of levels in B+tree
|
||||||
|
|
||||||
|
// Page size utilization.
|
||||||
|
BranchAlloc int // bytes allocated for physical branch pages
|
||||||
|
BranchInuse int // bytes actually used for branch data
|
||||||
|
LeafAlloc int // bytes allocated for physical leaf pages
|
||||||
|
LeafInuse int // bytes actually used for leaf data
|
||||||
|
|
||||||
|
// Bucket statistics
|
||||||
|
BucketN int // total number of buckets including the top bucket
|
||||||
|
InlineBucketN int // total number on inlined buckets
|
||||||
|
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BucketStats) Add(other BucketStats) {
|
||||||
|
s.BranchPageN += other.BranchPageN
|
||||||
|
s.BranchOverflowN += other.BranchOverflowN
|
||||||
|
s.LeafPageN += other.LeafPageN
|
||||||
|
s.LeafOverflowN += other.LeafOverflowN
|
||||||
|
s.KeyN += other.KeyN
|
||||||
|
if s.Depth < other.Depth {
|
||||||
|
s.Depth = other.Depth
|
||||||
|
}
|
||||||
|
s.BranchAlloc += other.BranchAlloc
|
||||||
|
s.BranchInuse += other.BranchInuse
|
||||||
|
s.LeafAlloc += other.LeafAlloc
|
||||||
|
s.LeafInuse += other.LeafInuse
|
||||||
|
|
||||||
|
s.BucketN += other.BucketN
|
||||||
|
s.InlineBucketN += other.InlineBucketN
|
||||||
|
s.InlineBucketInuse += other.InlineBucketInuse
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneBytes returns a copy of a given slice.
|
||||||
|
func cloneBytes(v []byte) []byte {
|
||||||
|
var clone = make([]byte, len(v))
|
||||||
|
copy(clone, v)
|
||||||
|
return clone
|
||||||
|
}
|
400
vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
Normal file
400
vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
Normal file
|
@ -0,0 +1,400 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||||
|
// Cursors see nested buckets with value == nil.
|
||||||
|
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||||
|
//
|
||||||
|
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||||
|
//
|
||||||
|
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||||
|
// and return unexpected keys and/or values. You must reposition your cursor
|
||||||
|
// after mutating data.
|
||||||
|
type Cursor struct {
|
||||||
|
bucket *Bucket
|
||||||
|
stack []elemRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket returns the bucket that this cursor was created from.
|
||||||
|
func (c *Cursor) Bucket() *Bucket {
|
||||||
|
return c.bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||||
|
// If the bucket is empty then a nil key and value are returned.
|
||||||
|
// The returned key and value are only valid for the life of the transaction.
|
||||||
|
func (c *Cursor) First() (key []byte, value []byte) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
c.stack = c.stack[:0]
|
||||||
|
p, n := c.bucket.pageNode(c.bucket.root)
|
||||||
|
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||||
|
c.first()
|
||||||
|
|
||||||
|
// If we land on an empty page then move to the next value.
|
||||||
|
// https://github.com/boltdb/bolt/issues/450
|
||||||
|
if c.stack[len(c.stack)-1].count() == 0 {
|
||||||
|
c.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||||
|
// If the bucket is empty then a nil key and value are returned.
|
||||||
|
// The returned key and value are only valid for the life of the transaction.
|
||||||
|
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
c.stack = c.stack[:0]
|
||||||
|
p, n := c.bucket.pageNode(c.bucket.root)
|
||||||
|
ref := elemRef{page: p, node: n}
|
||||||
|
ref.index = ref.count() - 1
|
||||||
|
c.stack = append(c.stack, ref)
|
||||||
|
c.last()
|
||||||
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||||
|
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||||
|
// The returned key and value are only valid for the life of the transaction.
|
||||||
|
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
k, v, flags := c.next()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||||
|
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||||
|
// The returned key and value are only valid for the life of the transaction.
|
||||||
|
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
|
||||||
|
// Attempt to move back one element until we're successful.
|
||||||
|
// Move up the stack as we hit the beginning of each page in our stack.
|
||||||
|
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||||
|
elem := &c.stack[i]
|
||||||
|
if elem.index > 0 {
|
||||||
|
elem.index--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.stack = c.stack[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've hit the end then return nil.
|
||||||
|
if len(c.stack) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move down the stack to find the last element of the last leaf under this branch.
|
||||||
|
c.last()
|
||||||
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek moves the cursor to a given key and returns it.
|
||||||
|
// If the key does not exist then the next key is used. If no keys
|
||||||
|
// follow, a nil key is returned.
|
||||||
|
// The returned key and value are only valid for the life of the transaction.
|
||||||
|
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||||
|
k, v, flags := c.seek(seek)
|
||||||
|
|
||||||
|
// If we ended up after the last element of a page then move to the next one.
|
||||||
|
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||||
|
k, v, flags = c.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == nil {
|
||||||
|
return nil, nil
|
||||||
|
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the current key/value under the cursor from the bucket.
|
||||||
|
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||||
|
func (c *Cursor) Delete() error {
|
||||||
|
if c.bucket.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !c.bucket.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
key, _, flags := c.keyValue()
|
||||||
|
// Return an error if current value is a bucket.
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
c.node().del(key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// seek moves the cursor to a given key and returns it.
|
||||||
|
// If the key does not exist then the next key is used.
|
||||||
|
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
|
||||||
|
// Start from root page/node and traverse to correct page.
|
||||||
|
c.stack = c.stack[:0]
|
||||||
|
c.search(seek, c.bucket.root)
|
||||||
|
ref := &c.stack[len(c.stack)-1]
|
||||||
|
|
||||||
|
// If the cursor is pointing to the end of page/node then return nil.
|
||||||
|
if ref.index >= ref.count() {
|
||||||
|
return nil, nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is a bucket then return a nil value.
|
||||||
|
return c.keyValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||||
|
func (c *Cursor) first() {
|
||||||
|
for {
|
||||||
|
// Exit when we hit a leaf page.
|
||||||
|
var ref = &c.stack[len(c.stack)-1]
|
||||||
|
if ref.isLeaf() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep adding pages pointing to the first element to the stack.
|
||||||
|
var pgid pgid
|
||||||
|
if ref.node != nil {
|
||||||
|
pgid = ref.node.inodes[ref.index].pgid
|
||||||
|
} else {
|
||||||
|
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||||
|
}
|
||||||
|
p, n := c.bucket.pageNode(pgid)
|
||||||
|
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||||
|
func (c *Cursor) last() {
|
||||||
|
for {
|
||||||
|
// Exit when we hit a leaf page.
|
||||||
|
ref := &c.stack[len(c.stack)-1]
|
||||||
|
if ref.isLeaf() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep adding pages pointing to the last element in the stack.
|
||||||
|
var pgid pgid
|
||||||
|
if ref.node != nil {
|
||||||
|
pgid = ref.node.inodes[ref.index].pgid
|
||||||
|
} else {
|
||||||
|
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||||
|
}
|
||||||
|
p, n := c.bucket.pageNode(pgid)
|
||||||
|
|
||||||
|
var nextRef = elemRef{page: p, node: n}
|
||||||
|
nextRef.index = nextRef.count() - 1
|
||||||
|
c.stack = append(c.stack, nextRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// next moves to the next leaf element and returns the key and value.
|
||||||
|
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||||
|
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||||
|
for {
|
||||||
|
// Attempt to move over one element until we're successful.
|
||||||
|
// Move up the stack as we hit the end of each page in our stack.
|
||||||
|
var i int
|
||||||
|
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||||
|
elem := &c.stack[i]
|
||||||
|
if elem.index < elem.count()-1 {
|
||||||
|
elem.index++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've hit the root page then stop and return. This will leave the
|
||||||
|
// cursor on the last element of the last page.
|
||||||
|
if i == -1 {
|
||||||
|
return nil, nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise start from where we left off in the stack and find the
|
||||||
|
// first element of the first leaf page.
|
||||||
|
c.stack = c.stack[:i+1]
|
||||||
|
c.first()
|
||||||
|
|
||||||
|
// If this is an empty page then restart and move back up the stack.
|
||||||
|
// https://github.com/boltdb/bolt/issues/450
|
||||||
|
if c.stack[len(c.stack)-1].count() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.keyValue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||||
|
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||||
|
p, n := c.bucket.pageNode(pgid)
|
||||||
|
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||||
|
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||||
|
}
|
||||||
|
e := elemRef{page: p, node: n}
|
||||||
|
c.stack = append(c.stack, e)
|
||||||
|
|
||||||
|
// If we're on a leaf page/node then find the specific node.
|
||||||
|
if e.isLeaf() {
|
||||||
|
c.nsearch(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != nil {
|
||||||
|
c.searchNode(key, n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.searchPage(key, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||||
|
var exact bool
|
||||||
|
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||||
|
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||||
|
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||||
|
ret := bytes.Compare(n.inodes[i].key, key)
|
||||||
|
if ret == 0 {
|
||||||
|
exact = true
|
||||||
|
}
|
||||||
|
return ret != -1
|
||||||
|
})
|
||||||
|
if !exact && index > 0 {
|
||||||
|
index--
|
||||||
|
}
|
||||||
|
c.stack[len(c.stack)-1].index = index
|
||||||
|
|
||||||
|
// Recursively search to the next page.
|
||||||
|
c.search(key, n.inodes[index].pgid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||||
|
// Binary search for the correct range.
|
||||||
|
inodes := p.branchPageElements()
|
||||||
|
|
||||||
|
var exact bool
|
||||||
|
index := sort.Search(int(p.count), func(i int) bool {
|
||||||
|
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||||
|
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||||
|
ret := bytes.Compare(inodes[i].key(), key)
|
||||||
|
if ret == 0 {
|
||||||
|
exact = true
|
||||||
|
}
|
||||||
|
return ret != -1
|
||||||
|
})
|
||||||
|
if !exact && index > 0 {
|
||||||
|
index--
|
||||||
|
}
|
||||||
|
c.stack[len(c.stack)-1].index = index
|
||||||
|
|
||||||
|
// Recursively search to the next page.
|
||||||
|
c.search(key, inodes[index].pgid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nsearch searches the leaf node on the top of the stack for a key.
|
||||||
|
func (c *Cursor) nsearch(key []byte) {
|
||||||
|
e := &c.stack[len(c.stack)-1]
|
||||||
|
p, n := e.page, e.node
|
||||||
|
|
||||||
|
// If we have a node then search its inodes.
|
||||||
|
if n != nil {
|
||||||
|
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||||
|
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||||
|
})
|
||||||
|
e.index = index
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a page then search its leaf elements.
|
||||||
|
inodes := p.leafPageElements()
|
||||||
|
index := sort.Search(int(p.count), func(i int) bool {
|
||||||
|
return bytes.Compare(inodes[i].key(), key) != -1
|
||||||
|
})
|
||||||
|
e.index = index
|
||||||
|
}
|
||||||
|
|
||||||
|
// keyValue returns the key and value of the current leaf element.
|
||||||
|
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||||
|
ref := &c.stack[len(c.stack)-1]
|
||||||
|
if ref.count() == 0 || ref.index >= ref.count() {
|
||||||
|
return nil, nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve value from node.
|
||||||
|
if ref.node != nil {
|
||||||
|
inode := &ref.node.inodes[ref.index]
|
||||||
|
return inode.key, inode.value, inode.flags
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or retrieve value from page.
|
||||||
|
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||||
|
return elem.key(), elem.value(), elem.flags
|
||||||
|
}
|
||||||
|
|
||||||
|
// node returns the node that the cursor is currently positioned on.
|
||||||
|
func (c *Cursor) node() *node {
|
||||||
|
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||||
|
|
||||||
|
// If the top of the stack is a leaf node then just return it.
|
||||||
|
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||||
|
return ref.node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start from root and traverse down the hierarchy.
|
||||||
|
var n = c.stack[0].node
|
||||||
|
if n == nil {
|
||||||
|
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||||
|
}
|
||||||
|
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||||
|
_assert(!n.isLeaf, "expected branch node")
|
||||||
|
n = n.childAt(int(ref.index))
|
||||||
|
}
|
||||||
|
_assert(n.isLeaf, "expected leaf node")
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// elemRef represents a reference to an element on a given page/node.
|
||||||
|
type elemRef struct {
|
||||||
|
page *page
|
||||||
|
node *node
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||||
|
func (r *elemRef) isLeaf() bool {
|
||||||
|
if r.node != nil {
|
||||||
|
return r.node.isLeaf
|
||||||
|
}
|
||||||
|
return (r.page.flags & leafPageFlag) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// count returns the number of inodes or page elements.
|
||||||
|
func (r *elemRef) count() int {
|
||||||
|
if r.node != nil {
|
||||||
|
return len(r.node.inodes)
|
||||||
|
}
|
||||||
|
return int(r.page.count)
|
||||||
|
}
|
1137
vendor/github.com/coreos/bbolt/db.go
generated
vendored
Normal file
1137
vendor/github.com/coreos/bbolt/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
44
vendor/github.com/coreos/bbolt/doc.go
generated
vendored
Normal file
44
vendor/github.com/coreos/bbolt/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
/*
|
||||||
|
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||||
|
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||||
|
multiple readers and a single writer. Bolt can be used for projects that
|
||||||
|
want a simple data store without the need to add large dependencies such as
|
||||||
|
Postgres or MySQL.
|
||||||
|
|
||||||
|
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||||
|
optimized for fast read access and does not require recovery in the event of a
|
||||||
|
system crash. Transactions which have not finished committing will simply be
|
||||||
|
rolled back in the event of a crash.
|
||||||
|
|
||||||
|
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||||
|
|
||||||
|
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||||
|
|
||||||
|
|
||||||
|
Basics
|
||||||
|
|
||||||
|
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||||
|
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||||
|
a collection of unique keys that are associated with values.
|
||||||
|
|
||||||
|
Transactions provide either read-only or read-write access to the database.
|
||||||
|
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||||
|
iterate over the dataset sequentially. Read-write transactions can create and
|
||||||
|
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||||
|
is allowed at a time.
|
||||||
|
|
||||||
|
|
||||||
|
Caveats
|
||||||
|
|
||||||
|
The database uses a read-only, memory-mapped data file to ensure that
|
||||||
|
applications cannot corrupt the database, however, this means that keys and
|
||||||
|
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||||
|
will cause Go to panic.
|
||||||
|
|
||||||
|
Keys and values retrieved from the database are only valid for the life of
|
||||||
|
the transaction. When used outside the transaction, these byte slices can
|
||||||
|
point to different data or can point to invalid memory which will cause a panic.
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
package bolt
|
71
vendor/github.com/coreos/bbolt/errors.go
generated
vendored
Normal file
71
vendor/github.com/coreos/bbolt/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// These errors can be returned when opening or calling methods on a DB.
|
||||||
|
var (
|
||||||
|
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||||
|
// is opened or after it is closed.
|
||||||
|
ErrDatabaseNotOpen = errors.New("database not open")
|
||||||
|
|
||||||
|
// ErrDatabaseOpen is returned when opening a database that is
|
||||||
|
// already open.
|
||||||
|
ErrDatabaseOpen = errors.New("database already open")
|
||||||
|
|
||||||
|
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||||
|
// This typically occurs when a file is not a bolt database.
|
||||||
|
ErrInvalid = errors.New("invalid database")
|
||||||
|
|
||||||
|
// ErrVersionMismatch is returned when the data file was created with a
|
||||||
|
// different version of Bolt.
|
||||||
|
ErrVersionMismatch = errors.New("version mismatch")
|
||||||
|
|
||||||
|
// ErrChecksum is returned when either meta page checksum does not match.
|
||||||
|
ErrChecksum = errors.New("checksum error")
|
||||||
|
|
||||||
|
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||||
|
// on the data file after the timeout passed to Open().
|
||||||
|
ErrTimeout = errors.New("timeout")
|
||||||
|
)
|
||||||
|
|
||||||
|
// These errors can occur when beginning or committing a Tx.
|
||||||
|
var (
|
||||||
|
// ErrTxNotWritable is returned when performing a write operation on a
|
||||||
|
// read-only transaction.
|
||||||
|
ErrTxNotWritable = errors.New("tx not writable")
|
||||||
|
|
||||||
|
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||||
|
// that has already been committed or rolled back.
|
||||||
|
ErrTxClosed = errors.New("tx closed")
|
||||||
|
|
||||||
|
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||||
|
// read-only database.
|
||||||
|
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// These errors can occur when putting or deleting a value or a bucket.
|
||||||
|
var (
|
||||||
|
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||||
|
// not been created yet.
|
||||||
|
ErrBucketNotFound = errors.New("bucket not found")
|
||||||
|
|
||||||
|
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||||
|
ErrBucketExists = errors.New("bucket already exists")
|
||||||
|
|
||||||
|
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||||
|
ErrBucketNameRequired = errors.New("bucket name required")
|
||||||
|
|
||||||
|
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||||
|
ErrKeyRequired = errors.New("key required")
|
||||||
|
|
||||||
|
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||||
|
ErrKeyTooLarge = errors.New("key too large")
|
||||||
|
|
||||||
|
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||||
|
ErrValueTooLarge = errors.New("value too large")
|
||||||
|
|
||||||
|
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||||
|
// on an existing non-bucket key or when trying to create or delete a
|
||||||
|
// non-bucket key on an existing bucket key.
|
||||||
|
ErrIncompatibleValue = errors.New("incompatible value")
|
||||||
|
)
|
330
vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
Normal file
330
vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
Normal file
|
@ -0,0 +1,330 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// txPending holds a list of pgids and corresponding allocation txns
|
||||||
|
// that are pending to be freed.
|
||||||
|
type txPending struct {
|
||||||
|
ids []pgid
|
||||||
|
alloctx []txid // txids allocating the ids
|
||||||
|
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// freelist represents a list of all pages that are available for allocation.
|
||||||
|
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||||
|
type freelist struct {
|
||||||
|
ids []pgid // all free and available free page ids.
|
||||||
|
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||||
|
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||||
|
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFreelist returns an empty, initialized freelist.
|
||||||
|
func newFreelist() *freelist {
|
||||||
|
return &freelist{
|
||||||
|
allocs: make(map[pgid]txid),
|
||||||
|
pending: make(map[txid]*txPending),
|
||||||
|
cache: make(map[pgid]bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// size returns the size of the page after serialization.
|
||||||
|
func (f *freelist) size() int {
|
||||||
|
n := f.count()
|
||||||
|
if n >= 0xFFFF {
|
||||||
|
// The first element will be used to store the count. See freelist.write.
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// count returns count of pages on the freelist
|
||||||
|
func (f *freelist) count() int {
|
||||||
|
return f.free_count() + f.pending_count()
|
||||||
|
}
|
||||||
|
|
||||||
|
// free_count returns count of free pages
|
||||||
|
func (f *freelist) free_count() int {
|
||||||
|
return len(f.ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pending_count returns count of pending pages
|
||||||
|
func (f *freelist) pending_count() int {
|
||||||
|
var count int
|
||||||
|
for _, txp := range f.pending {
|
||||||
|
count += len(txp.ids)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||||
|
// f.count returns the minimum length required for dst.
|
||||||
|
func (f *freelist) copyall(dst []pgid) {
|
||||||
|
m := make(pgids, 0, f.pending_count())
|
||||||
|
for _, txp := range f.pending {
|
||||||
|
m = append(m, txp.ids...)
|
||||||
|
}
|
||||||
|
sort.Sort(m)
|
||||||
|
mergepgids(dst, f.ids, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||||
|
// If a contiguous block cannot be found then 0 is returned.
|
||||||
|
func (f *freelist) allocate(txid txid, n int) pgid {
|
||||||
|
if len(f.ids) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var initial, previd pgid
|
||||||
|
for i, id := range f.ids {
|
||||||
|
if id <= 1 {
|
||||||
|
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset initial page if this is not contiguous.
|
||||||
|
if previd == 0 || id-previd != 1 {
|
||||||
|
initial = id
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we found a contiguous block then remove it and return it.
|
||||||
|
if (id-initial)+1 == pgid(n) {
|
||||||
|
// If we're allocating off the beginning then take the fast path
|
||||||
|
// and just adjust the existing slice. This will use extra memory
|
||||||
|
// temporarily but the append() in free() will realloc the slice
|
||||||
|
// as is necessary.
|
||||||
|
if (i + 1) == n {
|
||||||
|
f.ids = f.ids[i+1:]
|
||||||
|
} else {
|
||||||
|
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||||
|
f.ids = f.ids[:len(f.ids)-n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the free cache.
|
||||||
|
for i := pgid(0); i < pgid(n); i++ {
|
||||||
|
delete(f.cache, initial+i)
|
||||||
|
}
|
||||||
|
f.allocs[initial] = txid
|
||||||
|
return initial
|
||||||
|
}
|
||||||
|
|
||||||
|
previd = id
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// free releases a page and its overflow for a given transaction id.
|
||||||
|
// If the page is already free then a panic will occur.
|
||||||
|
func (f *freelist) free(txid txid, p *page) {
|
||||||
|
if p.id <= 1 {
|
||||||
|
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free page and all its overflow pages.
|
||||||
|
txp := f.pending[txid]
|
||||||
|
if txp == nil {
|
||||||
|
txp = &txPending{}
|
||||||
|
f.pending[txid] = txp
|
||||||
|
}
|
||||||
|
allocTxid, ok := f.allocs[p.id]
|
||||||
|
if ok {
|
||||||
|
delete(f.allocs, p.id)
|
||||||
|
} else if (p.flags & (freelistPageFlag | metaPageFlag)) != 0 {
|
||||||
|
// Safe to claim txid as allocating since these types are private to txid.
|
||||||
|
allocTxid = txid
|
||||||
|
}
|
||||||
|
|
||||||
|
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||||
|
// Verify that page is not already free.
|
||||||
|
if f.cache[id] {
|
||||||
|
panic(fmt.Sprintf("page %d already freed", id))
|
||||||
|
}
|
||||||
|
// Add to the freelist and cache.
|
||||||
|
txp.ids = append(txp.ids, id)
|
||||||
|
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||||
|
f.cache[id] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||||
|
func (f *freelist) release(txid txid) {
|
||||||
|
m := make(pgids, 0)
|
||||||
|
for tid, txp := range f.pending {
|
||||||
|
if tid <= txid {
|
||||||
|
// Move transaction's pending pages to the available freelist.
|
||||||
|
// Don't remove from the cache since the page is still free.
|
||||||
|
m = append(m, txp.ids...)
|
||||||
|
delete(f.pending, tid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(m)
|
||||||
|
f.ids = pgids(f.ids).merge(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||||
|
func (f *freelist) releaseRange(begin, end txid) {
|
||||||
|
if begin > end {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var m pgids
|
||||||
|
for tid, txp := range f.pending {
|
||||||
|
if tid < begin || tid > end {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Don't recompute freed pages if ranges haven't updated.
|
||||||
|
if txp.lastReleaseBegin == begin {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i := 0; i < len(txp.ids); i++ {
|
||||||
|
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m = append(m, txp.ids[i])
|
||||||
|
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||||
|
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||||
|
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||||
|
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
txp.lastReleaseBegin = begin
|
||||||
|
if len(txp.ids) == 0 {
|
||||||
|
delete(f.pending, tid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(m)
|
||||||
|
f.ids = pgids(f.ids).merge(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollback removes the pages from a given pending tx.
|
||||||
|
func (f *freelist) rollback(txid txid) {
|
||||||
|
// Remove page ids from cache.
|
||||||
|
txp := f.pending[txid]
|
||||||
|
if txp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var m pgids
|
||||||
|
for i, pgid := range txp.ids {
|
||||||
|
delete(f.cache, pgid)
|
||||||
|
tx := txp.alloctx[i]
|
||||||
|
if tx == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tx != txid {
|
||||||
|
// Pending free aborted; restore page back to alloc list.
|
||||||
|
f.allocs[pgid] = tx
|
||||||
|
} else {
|
||||||
|
// Freed page was allocated by this txn; OK to throw away.
|
||||||
|
m = append(m, pgid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Remove pages from pending list and mark as free if allocated by txid.
|
||||||
|
delete(f.pending, txid)
|
||||||
|
sort.Sort(m)
|
||||||
|
f.ids = pgids(f.ids).merge(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// freed returns whether a given page is in the free list.
|
||||||
|
func (f *freelist) freed(pgid pgid) bool {
|
||||||
|
return f.cache[pgid]
|
||||||
|
}
|
||||||
|
|
||||||
|
// read initializes the freelist from a freelist page.
|
||||||
|
func (f *freelist) read(p *page) {
|
||||||
|
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||||
|
// an overflow and the size of the freelist is stored as the first element.
|
||||||
|
idx, count := 0, int(p.count)
|
||||||
|
if count == 0xFFFF {
|
||||||
|
idx = 1
|
||||||
|
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the list of page ids from the freelist.
|
||||||
|
if count == 0 {
|
||||||
|
f.ids = nil
|
||||||
|
} else {
|
||||||
|
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||||
|
f.ids = make([]pgid, len(ids))
|
||||||
|
copy(f.ids, ids)
|
||||||
|
|
||||||
|
// Make sure they're sorted.
|
||||||
|
sort.Sort(pgids(f.ids))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebuild the page cache.
|
||||||
|
f.reindex()
|
||||||
|
}
|
||||||
|
|
||||||
|
// read initializes the freelist from a given list of ids.
|
||||||
|
func (f *freelist) readIDs(ids []pgid) {
|
||||||
|
f.ids = ids
|
||||||
|
f.reindex()
|
||||||
|
}
|
||||||
|
|
||||||
|
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||||
|
// saved to disk since in the event of a program crash, all pending ids will
|
||||||
|
// become free.
|
||||||
|
func (f *freelist) write(p *page) error {
|
||||||
|
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||||
|
|
||||||
|
// Update the header flag.
|
||||||
|
p.flags |= freelistPageFlag
|
||||||
|
|
||||||
|
// The page.count can only hold up to 64k elements so if we overflow that
|
||||||
|
// number then we handle it by putting the size in the first element.
|
||||||
|
lenids := f.count()
|
||||||
|
if lenids == 0 {
|
||||||
|
p.count = uint16(lenids)
|
||||||
|
} else if lenids < 0xFFFF {
|
||||||
|
p.count = uint16(lenids)
|
||||||
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||||
|
} else {
|
||||||
|
p.count = 0xFFFF
|
||||||
|
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||||
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reload reads the freelist from a page and filters out pending items.
|
||||||
|
func (f *freelist) reload(p *page) {
|
||||||
|
f.read(p)
|
||||||
|
|
||||||
|
// Build a cache of only pending pages.
|
||||||
|
pcache := make(map[pgid]bool)
|
||||||
|
for _, txp := range f.pending {
|
||||||
|
for _, pendingID := range txp.ids {
|
||||||
|
pcache[pendingID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check each page in the freelist and build a new available freelist
|
||||||
|
// with any pages not in the pending lists.
|
||||||
|
var a []pgid
|
||||||
|
for _, id := range f.ids {
|
||||||
|
if !pcache[id] {
|
||||||
|
a = append(a, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.ids = a
|
||||||
|
|
||||||
|
// Once the available list is rebuilt then rebuild the free cache so that
|
||||||
|
// it includes the available and pending free pages.
|
||||||
|
f.reindex()
|
||||||
|
}
|
||||||
|
|
||||||
|
// reindex rebuilds the free cache based on available and pending free lists.
|
||||||
|
func (f *freelist) reindex() {
|
||||||
|
f.cache = make(map[pgid]bool, len(f.ids))
|
||||||
|
for _, id := range f.ids {
|
||||||
|
f.cache[id] = true
|
||||||
|
}
|
||||||
|
for _, txp := range f.pending {
|
||||||
|
for _, pendingID := range txp.ids {
|
||||||
|
f.cache[pendingID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
604
vendor/github.com/coreos/bbolt/node.go
generated
vendored
Normal file
604
vendor/github.com/coreos/bbolt/node.go
generated
vendored
Normal file
|
@ -0,0 +1,604 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// node represents an in-memory, deserialized page.
|
||||||
|
type node struct {
|
||||||
|
bucket *Bucket
|
||||||
|
isLeaf bool
|
||||||
|
unbalanced bool
|
||||||
|
spilled bool
|
||||||
|
key []byte
|
||||||
|
pgid pgid
|
||||||
|
parent *node
|
||||||
|
children nodes
|
||||||
|
inodes inodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// root returns the top-level node this node is attached to.
|
||||||
|
func (n *node) root() *node {
|
||||||
|
if n.parent == nil {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
return n.parent.root()
|
||||||
|
}
|
||||||
|
|
||||||
|
// minKeys returns the minimum number of inodes this node should have.
|
||||||
|
func (n *node) minKeys() int {
|
||||||
|
if n.isLeaf {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// size returns the size of the node after serialization.
|
||||||
|
func (n *node) size() int {
|
||||||
|
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||||
|
for i := 0; i < len(n.inodes); i++ {
|
||||||
|
item := &n.inodes[i]
|
||||||
|
sz += elsz + len(item.key) + len(item.value)
|
||||||
|
}
|
||||||
|
return sz
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeLessThan returns true if the node is less than a given size.
|
||||||
|
// This is an optimization to avoid calculating a large node when we only need
|
||||||
|
// to know if it fits inside a certain page size.
|
||||||
|
func (n *node) sizeLessThan(v int) bool {
|
||||||
|
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||||
|
for i := 0; i < len(n.inodes); i++ {
|
||||||
|
item := &n.inodes[i]
|
||||||
|
sz += elsz + len(item.key) + len(item.value)
|
||||||
|
if sz >= v {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// pageElementSize returns the size of each page element based on the type of node.
|
||||||
|
func (n *node) pageElementSize() int {
|
||||||
|
if n.isLeaf {
|
||||||
|
return leafPageElementSize
|
||||||
|
}
|
||||||
|
return branchPageElementSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// childAt returns the child node at a given index.
|
||||||
|
func (n *node) childAt(index int) *node {
|
||||||
|
if n.isLeaf {
|
||||||
|
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||||
|
}
|
||||||
|
return n.bucket.node(n.inodes[index].pgid, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// childIndex returns the index of a given child node.
|
||||||
|
func (n *node) childIndex(child *node) int {
|
||||||
|
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||||
|
return index
|
||||||
|
}
|
||||||
|
|
||||||
|
// numChildren returns the number of children.
|
||||||
|
func (n *node) numChildren() int {
|
||||||
|
return len(n.inodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextSibling returns the next node with the same parent.
|
||||||
|
func (n *node) nextSibling() *node {
|
||||||
|
if n.parent == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
index := n.parent.childIndex(n)
|
||||||
|
if index >= n.parent.numChildren()-1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return n.parent.childAt(index + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// prevSibling returns the previous node with the same parent.
|
||||||
|
func (n *node) prevSibling() *node {
|
||||||
|
if n.parent == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
index := n.parent.childIndex(n)
|
||||||
|
if index == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return n.parent.childAt(index - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put inserts a key/value.
|
||||||
|
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||||
|
if pgid >= n.bucket.tx.meta.pgid {
|
||||||
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||||
|
} else if len(oldKey) <= 0 {
|
||||||
|
panic("put: zero-length old key")
|
||||||
|
} else if len(newKey) <= 0 {
|
||||||
|
panic("put: zero-length new key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find insertion index.
|
||||||
|
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||||
|
|
||||||
|
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||||
|
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||||
|
if !exact {
|
||||||
|
n.inodes = append(n.inodes, inode{})
|
||||||
|
copy(n.inodes[index+1:], n.inodes[index:])
|
||||||
|
}
|
||||||
|
|
||||||
|
inode := &n.inodes[index]
|
||||||
|
inode.flags = flags
|
||||||
|
inode.key = newKey
|
||||||
|
inode.value = value
|
||||||
|
inode.pgid = pgid
|
||||||
|
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// del removes a key from the node.
|
||||||
|
func (n *node) del(key []byte) {
|
||||||
|
// Find index of key.
|
||||||
|
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||||
|
|
||||||
|
// Exit if the key isn't found.
|
||||||
|
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete inode from the node.
|
||||||
|
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||||
|
|
||||||
|
// Mark the node as needing rebalancing.
|
||||||
|
n.unbalanced = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// read initializes the node from a page.
|
||||||
|
func (n *node) read(p *page) {
|
||||||
|
n.pgid = p.id
|
||||||
|
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||||
|
n.inodes = make(inodes, int(p.count))
|
||||||
|
|
||||||
|
for i := 0; i < int(p.count); i++ {
|
||||||
|
inode := &n.inodes[i]
|
||||||
|
if n.isLeaf {
|
||||||
|
elem := p.leafPageElement(uint16(i))
|
||||||
|
inode.flags = elem.flags
|
||||||
|
inode.key = elem.key()
|
||||||
|
inode.value = elem.value()
|
||||||
|
} else {
|
||||||
|
elem := p.branchPageElement(uint16(i))
|
||||||
|
inode.pgid = elem.pgid
|
||||||
|
inode.key = elem.key()
|
||||||
|
}
|
||||||
|
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save first key so we can find the node in the parent when we spill.
|
||||||
|
if len(n.inodes) > 0 {
|
||||||
|
n.key = n.inodes[0].key
|
||||||
|
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||||
|
} else {
|
||||||
|
n.key = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write writes the items onto one or more pages.
|
||||||
|
func (n *node) write(p *page) {
|
||||||
|
// Initialize page.
|
||||||
|
if n.isLeaf {
|
||||||
|
p.flags |= leafPageFlag
|
||||||
|
} else {
|
||||||
|
p.flags |= branchPageFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(n.inodes) >= 0xFFFF {
|
||||||
|
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||||
|
}
|
||||||
|
p.count = uint16(len(n.inodes))
|
||||||
|
|
||||||
|
// Stop here if there are no items to write.
|
||||||
|
if p.count == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over each item and write it to the page.
|
||||||
|
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||||
|
for i, item := range n.inodes {
|
||||||
|
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||||
|
|
||||||
|
// Write the page element.
|
||||||
|
if n.isLeaf {
|
||||||
|
elem := p.leafPageElement(uint16(i))
|
||||||
|
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||||
|
elem.flags = item.flags
|
||||||
|
elem.ksize = uint32(len(item.key))
|
||||||
|
elem.vsize = uint32(len(item.value))
|
||||||
|
} else {
|
||||||
|
elem := p.branchPageElement(uint16(i))
|
||||||
|
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||||
|
elem.ksize = uint32(len(item.key))
|
||||||
|
elem.pgid = item.pgid
|
||||||
|
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the length of key+value is larger than the max allocation size
|
||||||
|
// then we need to reallocate the byte array pointer.
|
||||||
|
//
|
||||||
|
// See: https://github.com/boltdb/bolt/pull/335
|
||||||
|
klen, vlen := len(item.key), len(item.value)
|
||||||
|
if len(b) < klen+vlen {
|
||||||
|
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write data for the element to the end of the page.
|
||||||
|
copy(b[0:], item.key)
|
||||||
|
b = b[klen:]
|
||||||
|
copy(b[0:], item.value)
|
||||||
|
b = b[vlen:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// DEBUG ONLY: n.dump()
|
||||||
|
}
|
||||||
|
|
||||||
|
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||||
|
// This should only be called from the spill() function.
|
||||||
|
func (n *node) split(pageSize int) []*node {
|
||||||
|
var nodes []*node
|
||||||
|
|
||||||
|
node := n
|
||||||
|
for {
|
||||||
|
// Split node into two.
|
||||||
|
a, b := node.splitTwo(pageSize)
|
||||||
|
nodes = append(nodes, a)
|
||||||
|
|
||||||
|
// If we can't split then exit the loop.
|
||||||
|
if b == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set node to b so it gets split on the next iteration.
|
||||||
|
node = b
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||||
|
// This should only be called from the split() function.
|
||||||
|
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||||
|
// Ignore the split if the page doesn't have at least enough nodes for
|
||||||
|
// two pages or if the nodes can fit in a single page.
|
||||||
|
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the threshold before starting a new node.
|
||||||
|
var fillPercent = n.bucket.FillPercent
|
||||||
|
if fillPercent < minFillPercent {
|
||||||
|
fillPercent = minFillPercent
|
||||||
|
} else if fillPercent > maxFillPercent {
|
||||||
|
fillPercent = maxFillPercent
|
||||||
|
}
|
||||||
|
threshold := int(float64(pageSize) * fillPercent)
|
||||||
|
|
||||||
|
// Determine split position and sizes of the two pages.
|
||||||
|
splitIndex, _ := n.splitIndex(threshold)
|
||||||
|
|
||||||
|
// Split node into two separate nodes.
|
||||||
|
// If there's no parent then we'll need to create one.
|
||||||
|
if n.parent == nil {
|
||||||
|
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new node and add it to the parent.
|
||||||
|
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||||
|
n.parent.children = append(n.parent.children, next)
|
||||||
|
|
||||||
|
// Split inodes across two nodes.
|
||||||
|
next.inodes = n.inodes[splitIndex:]
|
||||||
|
n.inodes = n.inodes[:splitIndex]
|
||||||
|
|
||||||
|
// Update the statistics.
|
||||||
|
n.bucket.tx.stats.Split++
|
||||||
|
|
||||||
|
return n, next
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitIndex finds the position where a page will fill a given threshold.
|
||||||
|
// It returns the index as well as the size of the first page.
|
||||||
|
// This is only be called from split().
|
||||||
|
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||||
|
sz = pageHeaderSize
|
||||||
|
|
||||||
|
// Loop until we only have the minimum number of keys required for the second page.
|
||||||
|
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||||
|
index = i
|
||||||
|
inode := n.inodes[i]
|
||||||
|
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||||
|
|
||||||
|
// If we have at least the minimum number of keys and adding another
|
||||||
|
// node would put us over the threshold then exit and return.
|
||||||
|
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the element size to the total size.
|
||||||
|
sz += elsize
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||||
|
// Returns an error if dirty pages cannot be allocated.
|
||||||
|
func (n *node) spill() error {
|
||||||
|
var tx = n.bucket.tx
|
||||||
|
if n.spilled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||||
|
// the case of split-merge so we cannot use a range loop. We have to check
|
||||||
|
// the children size on every loop iteration.
|
||||||
|
sort.Sort(n.children)
|
||||||
|
for i := 0; i < len(n.children); i++ {
|
||||||
|
if err := n.children[i].spill(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We no longer need the child list because it's only used for spill tracking.
|
||||||
|
n.children = nil
|
||||||
|
|
||||||
|
// Split nodes into appropriate sizes. The first node will always be n.
|
||||||
|
var nodes = n.split(tx.db.pageSize)
|
||||||
|
for _, node := range nodes {
|
||||||
|
// Add node's page to the freelist if it's not new.
|
||||||
|
if node.pgid > 0 {
|
||||||
|
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||||
|
node.pgid = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate contiguous space for the node.
|
||||||
|
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the node.
|
||||||
|
if p.id >= tx.meta.pgid {
|
||||||
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||||
|
}
|
||||||
|
node.pgid = p.id
|
||||||
|
node.write(p)
|
||||||
|
node.spilled = true
|
||||||
|
|
||||||
|
// Insert into parent inodes.
|
||||||
|
if node.parent != nil {
|
||||||
|
var key = node.key
|
||||||
|
if key == nil {
|
||||||
|
key = node.inodes[0].key
|
||||||
|
}
|
||||||
|
|
||||||
|
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||||
|
node.key = node.inodes[0].key
|
||||||
|
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the statistics.
|
||||||
|
tx.stats.Spill++
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the root node split and created a new root then we need to spill that
|
||||||
|
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||||
|
if n.parent != nil && n.parent.pgid == 0 {
|
||||||
|
n.children = nil
|
||||||
|
return n.parent.spill()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||||
|
// size is below a threshold or if there are not enough keys.
|
||||||
|
func (n *node) rebalance() {
|
||||||
|
if !n.unbalanced {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.unbalanced = false
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
n.bucket.tx.stats.Rebalance++
|
||||||
|
|
||||||
|
// Ignore if node is above threshold (25%) and has enough keys.
|
||||||
|
var threshold = n.bucket.tx.db.pageSize / 4
|
||||||
|
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root node has special handling.
|
||||||
|
if n.parent == nil {
|
||||||
|
// If root node is a branch and only has one node then collapse it.
|
||||||
|
if !n.isLeaf && len(n.inodes) == 1 {
|
||||||
|
// Move root's child up.
|
||||||
|
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||||
|
n.isLeaf = child.isLeaf
|
||||||
|
n.inodes = child.inodes[:]
|
||||||
|
n.children = child.children
|
||||||
|
|
||||||
|
// Reparent all child nodes being moved.
|
||||||
|
for _, inode := range n.inodes {
|
||||||
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
|
child.parent = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove old child.
|
||||||
|
child.parent = nil
|
||||||
|
delete(n.bucket.nodes, child.pgid)
|
||||||
|
child.free()
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If node has no keys then just remove it.
|
||||||
|
if n.numChildren() == 0 {
|
||||||
|
n.parent.del(n.key)
|
||||||
|
n.parent.removeChild(n)
|
||||||
|
delete(n.bucket.nodes, n.pgid)
|
||||||
|
n.free()
|
||||||
|
n.parent.rebalance()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||||
|
|
||||||
|
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||||
|
var target *node
|
||||||
|
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||||
|
if useNextSibling {
|
||||||
|
target = n.nextSibling()
|
||||||
|
} else {
|
||||||
|
target = n.prevSibling()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If both this node and the target node are too small then merge them.
|
||||||
|
if useNextSibling {
|
||||||
|
// Reparent all child nodes being moved.
|
||||||
|
for _, inode := range target.inodes {
|
||||||
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
|
child.parent.removeChild(child)
|
||||||
|
child.parent = n
|
||||||
|
child.parent.children = append(child.parent.children, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy over inodes from target and remove target.
|
||||||
|
n.inodes = append(n.inodes, target.inodes...)
|
||||||
|
n.parent.del(target.key)
|
||||||
|
n.parent.removeChild(target)
|
||||||
|
delete(n.bucket.nodes, target.pgid)
|
||||||
|
target.free()
|
||||||
|
} else {
|
||||||
|
// Reparent all child nodes being moved.
|
||||||
|
for _, inode := range n.inodes {
|
||||||
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
|
child.parent.removeChild(child)
|
||||||
|
child.parent = target
|
||||||
|
child.parent.children = append(child.parent.children, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy over inodes to target and remove node.
|
||||||
|
target.inodes = append(target.inodes, n.inodes...)
|
||||||
|
n.parent.del(n.key)
|
||||||
|
n.parent.removeChild(n)
|
||||||
|
delete(n.bucket.nodes, n.pgid)
|
||||||
|
n.free()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||||
|
n.parent.rebalance()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removes a node from the list of in-memory children.
|
||||||
|
// This does not affect the inodes.
|
||||||
|
func (n *node) removeChild(target *node) {
|
||||||
|
for i, child := range n.children {
|
||||||
|
if child == target {
|
||||||
|
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||||
|
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||||
|
func (n *node) dereference() {
|
||||||
|
if n.key != nil {
|
||||||
|
key := make([]byte, len(n.key))
|
||||||
|
copy(key, n.key)
|
||||||
|
n.key = key
|
||||||
|
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range n.inodes {
|
||||||
|
inode := &n.inodes[i]
|
||||||
|
|
||||||
|
key := make([]byte, len(inode.key))
|
||||||
|
copy(key, inode.key)
|
||||||
|
inode.key = key
|
||||||
|
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||||
|
|
||||||
|
value := make([]byte, len(inode.value))
|
||||||
|
copy(value, inode.value)
|
||||||
|
inode.value = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively dereference children.
|
||||||
|
for _, child := range n.children {
|
||||||
|
child.dereference()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
n.bucket.tx.stats.NodeDeref++
|
||||||
|
}
|
||||||
|
|
||||||
|
// free adds the node's underlying page to the freelist.
|
||||||
|
func (n *node) free() {
|
||||||
|
if n.pgid != 0 {
|
||||||
|
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||||
|
n.pgid = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||||
|
/*
|
||||||
|
func (n *node) dump() {
|
||||||
|
// Write node header.
|
||||||
|
var typ = "branch"
|
||||||
|
if n.isLeaf {
|
||||||
|
typ = "leaf"
|
||||||
|
}
|
||||||
|
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||||
|
|
||||||
|
// Write out abbreviated version of each item.
|
||||||
|
for _, item := range n.inodes {
|
||||||
|
if n.isLeaf {
|
||||||
|
if item.flags&bucketLeafFlag != 0 {
|
||||||
|
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||||
|
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||||
|
} else {
|
||||||
|
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
warn("")
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
type nodes []*node
|
||||||
|
|
||||||
|
func (s nodes) Len() int { return len(s) }
|
||||||
|
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||||
|
|
||||||
|
// inode represents an internal node inside of a node.
|
||||||
|
// It can be used to point to elements in a page or point
|
||||||
|
// to an element which hasn't been added to a page yet.
|
||||||
|
type inode struct {
|
||||||
|
flags uint32
|
||||||
|
pgid pgid
|
||||||
|
key []byte
|
||||||
|
value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type inodes []inode
|
197
vendor/github.com/coreos/bbolt/page.go
generated
vendored
Normal file
197
vendor/github.com/coreos/bbolt/page.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||||
|
|
||||||
|
const minKeysPerPage = 2
|
||||||
|
|
||||||
|
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||||
|
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||||
|
|
||||||
|
const (
|
||||||
|
branchPageFlag = 0x01
|
||||||
|
leafPageFlag = 0x02
|
||||||
|
metaPageFlag = 0x04
|
||||||
|
freelistPageFlag = 0x10
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bucketLeafFlag = 0x01
|
||||||
|
)
|
||||||
|
|
||||||
|
type pgid uint64
|
||||||
|
|
||||||
|
type page struct {
|
||||||
|
id pgid
|
||||||
|
flags uint16
|
||||||
|
count uint16
|
||||||
|
overflow uint32
|
||||||
|
ptr uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// typ returns a human readable page type string used for debugging.
|
||||||
|
func (p *page) typ() string {
|
||||||
|
if (p.flags & branchPageFlag) != 0 {
|
||||||
|
return "branch"
|
||||||
|
} else if (p.flags & leafPageFlag) != 0 {
|
||||||
|
return "leaf"
|
||||||
|
} else if (p.flags & metaPageFlag) != 0 {
|
||||||
|
return "meta"
|
||||||
|
} else if (p.flags & freelistPageFlag) != 0 {
|
||||||
|
return "freelist"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// meta returns a pointer to the metadata section of the page.
|
||||||
|
func (p *page) meta() *meta {
|
||||||
|
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// leafPageElement retrieves the leaf node by index
|
||||||
|
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||||
|
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// leafPageElements retrieves a list of leaf nodes.
|
||||||
|
func (p *page) leafPageElements() []leafPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// branchPageElement retrieves the branch node by index
|
||||||
|
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||||
|
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
// branchPageElements retrieves a list of branch nodes.
|
||||||
|
func (p *page) branchPageElements() []branchPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump writes n bytes of the page to STDERR as hex output.
|
||||||
|
func (p *page) hexdump(n int) {
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||||
|
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
type pages []*page
|
||||||
|
|
||||||
|
func (s pages) Len() int { return len(s) }
|
||||||
|
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||||
|
|
||||||
|
// branchPageElement represents a node on a branch page.
|
||||||
|
type branchPageElement struct {
|
||||||
|
pos uint32
|
||||||
|
ksize uint32
|
||||||
|
pgid pgid
|
||||||
|
}
|
||||||
|
|
||||||
|
// key returns a byte slice of the node key.
|
||||||
|
func (n *branchPageElement) key() []byte {
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||||
|
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||||
|
}
|
||||||
|
|
||||||
|
// leafPageElement represents a node on a leaf page.
|
||||||
|
type leafPageElement struct {
|
||||||
|
flags uint32
|
||||||
|
pos uint32
|
||||||
|
ksize uint32
|
||||||
|
vsize uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// key returns a byte slice of the node key.
|
||||||
|
func (n *leafPageElement) key() []byte {
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||||
|
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||||
|
}
|
||||||
|
|
||||||
|
// value returns a byte slice of the node value.
|
||||||
|
func (n *leafPageElement) value() []byte {
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||||
|
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo represents human readable information about a page.
|
||||||
|
type PageInfo struct {
|
||||||
|
ID int
|
||||||
|
Type string
|
||||||
|
Count int
|
||||||
|
OverflowCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
type pgids []pgid
|
||||||
|
|
||||||
|
func (s pgids) Len() int { return len(s) }
|
||||||
|
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
|
||||||
|
// merge returns the sorted union of a and b.
|
||||||
|
func (a pgids) merge(b pgids) pgids {
|
||||||
|
// Return the opposite slice if one is nil.
|
||||||
|
if len(a) == 0 {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
merged := make(pgids, len(a)+len(b))
|
||||||
|
mergepgids(merged, a, b)
|
||||||
|
return merged
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergepgids copies the sorted union of a and b into dst.
|
||||||
|
// If dst is too small, it panics.
|
||||||
|
func mergepgids(dst, a, b pgids) {
|
||||||
|
if len(dst) < len(a)+len(b) {
|
||||||
|
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||||
|
}
|
||||||
|
// Copy in the opposite slice if one is nil.
|
||||||
|
if len(a) == 0 {
|
||||||
|
copy(dst, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
copy(dst, a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merged will hold all elements from both lists.
|
||||||
|
merged := dst[:0]
|
||||||
|
|
||||||
|
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||||
|
lead, follow := a, b
|
||||||
|
if b[0] < a[0] {
|
||||||
|
lead, follow = b, a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue while there are elements in the lead.
|
||||||
|
for len(lead) > 0 {
|
||||||
|
// Merge largest prefix of lead that is ahead of follow[0].
|
||||||
|
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||||
|
merged = append(merged, lead[:n]...)
|
||||||
|
if n >= len(lead) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap lead and follow.
|
||||||
|
lead, follow = follow, lead[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append what's left in follow.
|
||||||
|
_ = append(merged, follow...)
|
||||||
|
}
|
705
vendor/github.com/coreos/bbolt/tx.go
generated
vendored
Normal file
705
vendor/github.com/coreos/bbolt/tx.go
generated
vendored
Normal file
|
@ -0,0 +1,705 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// txid represents the internal transaction identifier.
|
||||||
|
type txid uint64
|
||||||
|
|
||||||
|
// Tx represents a read-only or read/write transaction on the database.
|
||||||
|
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||||
|
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||||
|
//
|
||||||
|
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||||
|
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||||
|
// are using them. A long running read transaction can cause the database to
|
||||||
|
// quickly grow.
|
||||||
|
type Tx struct {
|
||||||
|
writable bool
|
||||||
|
managed bool
|
||||||
|
db *DB
|
||||||
|
meta *meta
|
||||||
|
root Bucket
|
||||||
|
pages map[pgid]*page
|
||||||
|
stats TxStats
|
||||||
|
commitHandlers []func()
|
||||||
|
|
||||||
|
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||||
|
// Tx opens the database file with the specified flag to copy the data.
|
||||||
|
//
|
||||||
|
// By default, the flag is unset, which works well for mostly in-memory
|
||||||
|
// workloads. For databases that are much larger than available RAM,
|
||||||
|
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||||
|
WriteFlag int
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes the transaction.
|
||||||
|
func (tx *Tx) init(db *DB) {
|
||||||
|
tx.db = db
|
||||||
|
tx.pages = nil
|
||||||
|
|
||||||
|
// Copy the meta page since it can be changed by the writer.
|
||||||
|
tx.meta = &meta{}
|
||||||
|
db.meta().copy(tx.meta)
|
||||||
|
|
||||||
|
// Copy over the root bucket.
|
||||||
|
tx.root = newBucket(tx)
|
||||||
|
tx.root.bucket = &bucket{}
|
||||||
|
*tx.root.bucket = tx.meta.root
|
||||||
|
|
||||||
|
// Increment the transaction id and add a page cache for writable transactions.
|
||||||
|
if tx.writable {
|
||||||
|
tx.pages = make(map[pgid]*page)
|
||||||
|
tx.meta.txid += txid(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the transaction id.
|
||||||
|
func (tx *Tx) ID() int {
|
||||||
|
return int(tx.meta.txid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DB returns a reference to the database that created the transaction.
|
||||||
|
func (tx *Tx) DB() *DB {
|
||||||
|
return tx.db
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns current database size in bytes as seen by this transaction.
|
||||||
|
func (tx *Tx) Size() int64 {
|
||||||
|
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writable returns whether the transaction can perform write operations.
|
||||||
|
func (tx *Tx) Writable() bool {
|
||||||
|
return tx.writable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cursor creates a cursor associated with the root bucket.
|
||||||
|
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||||
|
// The cursor is only valid as long as the transaction is open.
|
||||||
|
// Do not use a cursor after the transaction is closed.
|
||||||
|
func (tx *Tx) Cursor() *Cursor {
|
||||||
|
return tx.root.Cursor()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats retrieves a copy of the current transaction statistics.
|
||||||
|
func (tx *Tx) Stats() TxStats {
|
||||||
|
return tx.stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket retrieves a bucket by name.
|
||||||
|
// Returns nil if the bucket does not exist.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||||
|
return tx.root.Bucket(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucket creates a new bucket.
|
||||||
|
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||||
|
return tx.root.CreateBucket(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||||
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
// The bucket instance is only valid for the lifetime of the transaction.
|
||||||
|
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||||
|
return tx.root.CreateBucketIfNotExists(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBucket deletes a bucket.
|
||||||
|
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||||
|
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||||
|
return tx.root.DeleteBucket(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEach executes a function for each bucket in the root.
|
||||||
|
// If the provided function returns an error then the iteration is stopped and
|
||||||
|
// the error is returned to the caller.
|
||||||
|
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||||
|
return tx.root.ForEach(func(k, v []byte) error {
|
||||||
|
return fn(k, tx.root.Bucket(k))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||||
|
func (tx *Tx) OnCommit(fn func()) {
|
||||||
|
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit writes all changes to disk and updates the meta page.
|
||||||
|
// Returns an error if a disk write error occurs, or if Commit is
|
||||||
|
// called on a read-only transaction.
|
||||||
|
func (tx *Tx) Commit() error {
|
||||||
|
_assert(!tx.managed, "managed tx commit not allowed")
|
||||||
|
if tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !tx.writable {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||||
|
|
||||||
|
// Rebalance nodes which have had deletions.
|
||||||
|
var startTime = time.Now()
|
||||||
|
tx.root.rebalance()
|
||||||
|
if tx.stats.Rebalance > 0 {
|
||||||
|
tx.stats.RebalanceTime += time.Since(startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// spill data onto dirty pages.
|
||||||
|
startTime = time.Now()
|
||||||
|
if err := tx.root.spill(); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx.stats.SpillTime += time.Since(startTime)
|
||||||
|
|
||||||
|
// Free the old root bucket.
|
||||||
|
tx.meta.root.root = tx.root.root
|
||||||
|
|
||||||
|
// Free the old freelist because commit writes out a fresh freelist.
|
||||||
|
if tx.meta.freelist != pgidNoFreelist {
|
||||||
|
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !tx.db.NoFreelistSync {
|
||||||
|
err := tx.commitFreelist()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tx.meta.freelist = pgidNoFreelist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write dirty pages to disk.
|
||||||
|
startTime = time.Now()
|
||||||
|
if err := tx.write(); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If strict mode is enabled then perform a consistency check.
|
||||||
|
// Only the first consistency error is reported in the panic.
|
||||||
|
if tx.db.StrictMode {
|
||||||
|
ch := tx.Check()
|
||||||
|
var errs []string
|
||||||
|
for {
|
||||||
|
err, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs = append(errs, err.Error())
|
||||||
|
}
|
||||||
|
if len(errs) > 0 {
|
||||||
|
panic("check fail: " + strings.Join(errs, "\n"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write meta to disk.
|
||||||
|
if err := tx.writeMeta(); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx.stats.WriteTime += time.Since(startTime)
|
||||||
|
|
||||||
|
// Finalize the transaction.
|
||||||
|
tx.close()
|
||||||
|
|
||||||
|
// Execute commit handlers now that the locks have been removed.
|
||||||
|
for _, fn := range tx.commitHandlers {
|
||||||
|
fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) commitFreelist() error {
|
||||||
|
// Allocate new pages for the new free list. This will overestimate
|
||||||
|
// the size of the freelist but not underestimate the size (which would be bad).
|
||||||
|
opgid := tx.meta.pgid
|
||||||
|
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||||
|
if err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := tx.db.freelist.write(p); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx.meta.freelist = p.id
|
||||||
|
// If the high water mark has moved up then attempt to grow the database.
|
||||||
|
if tx.meta.pgid > opgid {
|
||||||
|
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||||
|
// transactions must be rolled back and not committed.
|
||||||
|
func (tx *Tx) Rollback() error {
|
||||||
|
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||||
|
if tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
}
|
||||||
|
tx.rollback()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) rollback() {
|
||||||
|
if tx.db == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if tx.writable {
|
||||||
|
tx.db.freelist.rollback(tx.meta.txid)
|
||||||
|
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||||
|
}
|
||||||
|
tx.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) close() {
|
||||||
|
if tx.db == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if tx.writable {
|
||||||
|
// Grab freelist stats.
|
||||||
|
var freelistFreeN = tx.db.freelist.free_count()
|
||||||
|
var freelistPendingN = tx.db.freelist.pending_count()
|
||||||
|
var freelistAlloc = tx.db.freelist.size()
|
||||||
|
|
||||||
|
// Remove transaction ref & writer lock.
|
||||||
|
tx.db.rwtx = nil
|
||||||
|
tx.db.rwlock.Unlock()
|
||||||
|
|
||||||
|
// Merge statistics.
|
||||||
|
tx.db.statlock.Lock()
|
||||||
|
tx.db.stats.FreePageN = freelistFreeN
|
||||||
|
tx.db.stats.PendingPageN = freelistPendingN
|
||||||
|
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||||
|
tx.db.stats.FreelistInuse = freelistAlloc
|
||||||
|
tx.db.stats.TxStats.add(&tx.stats)
|
||||||
|
tx.db.statlock.Unlock()
|
||||||
|
} else {
|
||||||
|
tx.db.removeTx(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear all references.
|
||||||
|
tx.db = nil
|
||||||
|
tx.meta = nil
|
||||||
|
tx.root = Bucket{tx: tx}
|
||||||
|
tx.pages = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy writes the entire database to a writer.
|
||||||
|
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||||
|
func (tx *Tx) Copy(w io.Writer) error {
|
||||||
|
_, err := tx.WriteTo(w)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTo writes the entire database to a writer.
|
||||||
|
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||||
|
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||||
|
// Attempt to open reader with WriteFlag
|
||||||
|
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if cerr := f.Close(); err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Generate a meta page. We use the same page data for both meta pages.
|
||||||
|
buf := make([]byte, tx.db.pageSize)
|
||||||
|
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||||
|
page.flags = metaPageFlag
|
||||||
|
*page.meta() = *tx.meta
|
||||||
|
|
||||||
|
// Write meta 0.
|
||||||
|
page.id = 0
|
||||||
|
page.meta().checksum = page.meta().sum64()
|
||||||
|
nn, err := w.Write(buf)
|
||||||
|
n += int64(nn)
|
||||||
|
if err != nil {
|
||||||
|
return n, fmt.Errorf("meta 0 copy: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write meta 1 with a lower transaction id.
|
||||||
|
page.id = 1
|
||||||
|
page.meta().txid -= 1
|
||||||
|
page.meta().checksum = page.meta().sum64()
|
||||||
|
nn, err = w.Write(buf)
|
||||||
|
n += int64(nn)
|
||||||
|
if err != nil {
|
||||||
|
return n, fmt.Errorf("meta 1 copy: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move past the meta pages in the file.
|
||||||
|
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
|
||||||
|
return n, fmt.Errorf("seek: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy data pages.
|
||||||
|
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||||
|
n += wn
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies the entire database to file at the given path.
|
||||||
|
// A reader transaction is maintained during the copy so it is safe to continue
|
||||||
|
// using the database while a copy is in progress.
|
||||||
|
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||||
|
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Copy(f)
|
||||||
|
if err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check performs several consistency checks on the database for this transaction.
|
||||||
|
// An error is returned if any inconsistency is found.
|
||||||
|
//
|
||||||
|
// It can be safely run concurrently on a writable transaction. However, this
|
||||||
|
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||||
|
// because of caching. This overhead can be removed if running on a read-only
|
||||||
|
// transaction, however, it is not safe to execute other writer transactions at
|
||||||
|
// the same time.
|
||||||
|
func (tx *Tx) Check() <-chan error {
|
||||||
|
ch := make(chan error)
|
||||||
|
go tx.check(ch)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) check(ch chan error) {
|
||||||
|
// Force loading free list if opened in ReadOnly mode.
|
||||||
|
tx.db.loadFreelist()
|
||||||
|
|
||||||
|
// Check if any pages are double freed.
|
||||||
|
freed := make(map[pgid]bool)
|
||||||
|
all := make([]pgid, tx.db.freelist.count())
|
||||||
|
tx.db.freelist.copyall(all)
|
||||||
|
for _, id := range all {
|
||||||
|
if freed[id] {
|
||||||
|
ch <- fmt.Errorf("page %d: already freed", id)
|
||||||
|
}
|
||||||
|
freed[id] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track every reachable page.
|
||||||
|
reachable := make(map[pgid]*page)
|
||||||
|
reachable[0] = tx.page(0) // meta0
|
||||||
|
reachable[1] = tx.page(1) // meta1
|
||||||
|
if tx.meta.freelist != pgidNoFreelist {
|
||||||
|
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||||
|
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively check buckets.
|
||||||
|
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||||
|
|
||||||
|
// Ensure all pages below high water mark are either reachable or freed.
|
||||||
|
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||||
|
_, isReachable := reachable[i]
|
||||||
|
if !isReachable && !freed[i] {
|
||||||
|
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the channel to signal completion.
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||||
|
// Ignore inline buckets.
|
||||||
|
if b.root == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check every page used by this bucket.
|
||||||
|
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||||
|
if p.id > tx.meta.pgid {
|
||||||
|
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure each page is only referenced once.
|
||||||
|
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||||
|
var id = p.id + i
|
||||||
|
if _, ok := reachable[id]; ok {
|
||||||
|
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||||
|
}
|
||||||
|
reachable[id] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should only encounter un-freed leaf and branch pages.
|
||||||
|
if freed[p.id] {
|
||||||
|
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||||
|
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||||
|
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check each bucket within this bucket.
|
||||||
|
_ = b.ForEach(func(k, v []byte) error {
|
||||||
|
if child := b.Bucket(k); child != nil {
|
||||||
|
tx.checkBucket(child, reachable, freed, ch)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate returns a contiguous block of memory starting at a given page.
|
||||||
|
func (tx *Tx) allocate(count int) (*page, error) {
|
||||||
|
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save to our page cache.
|
||||||
|
tx.pages[p.id] = p
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
tx.stats.PageCount++
|
||||||
|
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// write writes any dirty pages to disk.
|
||||||
|
func (tx *Tx) write() error {
|
||||||
|
// Sort pages by id.
|
||||||
|
pages := make(pages, 0, len(tx.pages))
|
||||||
|
for _, p := range tx.pages {
|
||||||
|
pages = append(pages, p)
|
||||||
|
}
|
||||||
|
// Clear out page cache early.
|
||||||
|
tx.pages = make(map[pgid]*page)
|
||||||
|
sort.Sort(pages)
|
||||||
|
|
||||||
|
// Write pages to disk in order.
|
||||||
|
for _, p := range pages {
|
||||||
|
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||||
|
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||||
|
|
||||||
|
// Write out page in "max allocation" sized chunks.
|
||||||
|
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||||
|
for {
|
||||||
|
// Limit our write to our max allocation size.
|
||||||
|
sz := size
|
||||||
|
if sz > maxAllocSize-1 {
|
||||||
|
sz = maxAllocSize - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write chunk to disk.
|
||||||
|
buf := ptr[:sz]
|
||||||
|
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
tx.stats.Write++
|
||||||
|
|
||||||
|
// Exit inner for loop if we've written all the chunks.
|
||||||
|
size -= sz
|
||||||
|
if size == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise move offset forward and move pointer to next chunk.
|
||||||
|
offset += int64(sz)
|
||||||
|
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore file sync if flag is set on DB.
|
||||||
|
if !tx.db.NoSync || IgnoreNoSync {
|
||||||
|
if err := fdatasync(tx.db); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put small pages back to page pool.
|
||||||
|
for _, p := range pages {
|
||||||
|
// Ignore page sizes over 1 page.
|
||||||
|
// These are allocated using make() instead of the page pool.
|
||||||
|
if int(p.overflow) != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||||
|
|
||||||
|
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||||
|
for i := range buf {
|
||||||
|
buf[i] = 0
|
||||||
|
}
|
||||||
|
tx.db.pagePool.Put(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMeta writes the meta to the disk.
|
||||||
|
func (tx *Tx) writeMeta() error {
|
||||||
|
// Create a temporary buffer for the meta page.
|
||||||
|
buf := make([]byte, tx.db.pageSize)
|
||||||
|
p := tx.db.pageInBuffer(buf, 0)
|
||||||
|
tx.meta.write(p)
|
||||||
|
|
||||||
|
// Write the meta page to file.
|
||||||
|
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !tx.db.NoSync || IgnoreNoSync {
|
||||||
|
if err := fdatasync(tx.db); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
tx.stats.Write++
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// page returns a reference to the page with a given id.
|
||||||
|
// If page has been written to then a temporary buffered page is returned.
|
||||||
|
func (tx *Tx) page(id pgid) *page {
|
||||||
|
// Check the dirty pages first.
|
||||||
|
if tx.pages != nil {
|
||||||
|
if p, ok := tx.pages[id]; ok {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise return directly from the mmap.
|
||||||
|
return tx.db.page(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEachPage iterates over every page within a given page and executes a function.
|
||||||
|
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||||
|
p := tx.page(pgid)
|
||||||
|
|
||||||
|
// Execute function.
|
||||||
|
fn(p, depth)
|
||||||
|
|
||||||
|
// Recursively loop over children.
|
||||||
|
if (p.flags & branchPageFlag) != 0 {
|
||||||
|
for i := 0; i < int(p.count); i++ {
|
||||||
|
elem := p.branchPageElement(uint16(i))
|
||||||
|
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Page returns page information for a given page number.
|
||||||
|
// This is only safe for concurrent use when used by a writable transaction.
|
||||||
|
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||||
|
if tx.db == nil {
|
||||||
|
return nil, ErrTxClosed
|
||||||
|
} else if pgid(id) >= tx.meta.pgid {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the page info.
|
||||||
|
p := tx.db.page(pgid(id))
|
||||||
|
info := &PageInfo{
|
||||||
|
ID: id,
|
||||||
|
Count: int(p.count),
|
||||||
|
OverflowCount: int(p.overflow),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the type (or if it's free).
|
||||||
|
if tx.db.freelist.freed(pgid(id)) {
|
||||||
|
info.Type = "free"
|
||||||
|
} else {
|
||||||
|
info.Type = p.typ()
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxStats represents statistics about the actions performed by the transaction.
|
||||||
|
type TxStats struct {
|
||||||
|
// Page statistics.
|
||||||
|
PageCount int // number of page allocations
|
||||||
|
PageAlloc int // total bytes allocated
|
||||||
|
|
||||||
|
// Cursor statistics.
|
||||||
|
CursorCount int // number of cursors created
|
||||||
|
|
||||||
|
// Node statistics
|
||||||
|
NodeCount int // number of node allocations
|
||||||
|
NodeDeref int // number of node dereferences
|
||||||
|
|
||||||
|
// Rebalance statistics.
|
||||||
|
Rebalance int // number of node rebalances
|
||||||
|
RebalanceTime time.Duration // total time spent rebalancing
|
||||||
|
|
||||||
|
// Split/Spill statistics.
|
||||||
|
Split int // number of nodes split
|
||||||
|
Spill int // number of nodes spilled
|
||||||
|
SpillTime time.Duration // total time spent spilling
|
||||||
|
|
||||||
|
// Write statistics.
|
||||||
|
Write int // number of writes performed
|
||||||
|
WriteTime time.Duration // total time spent writing to disk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *TxStats) add(other *TxStats) {
|
||||||
|
s.PageCount += other.PageCount
|
||||||
|
s.PageAlloc += other.PageAlloc
|
||||||
|
s.CursorCount += other.CursorCount
|
||||||
|
s.NodeCount += other.NodeCount
|
||||||
|
s.NodeDeref += other.NodeDeref
|
||||||
|
s.Rebalance += other.Rebalance
|
||||||
|
s.RebalanceTime += other.RebalanceTime
|
||||||
|
s.Split += other.Split
|
||||||
|
s.Spill += other.Spill
|
||||||
|
s.SpillTime += other.SpillTime
|
||||||
|
s.Write += other.Write
|
||||||
|
s.WriteTime += other.WriteTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||||
|
// This is useful when obtaining stats at two different points and time and
|
||||||
|
// you need the performance counters that occurred within that time span.
|
||||||
|
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||||
|
var diff TxStats
|
||||||
|
diff.PageCount = s.PageCount - other.PageCount
|
||||||
|
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||||
|
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||||
|
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||||
|
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||||
|
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||||
|
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||||
|
diff.Split = s.Split - other.Split
|
||||||
|
diff.Spill = s.Spill - other.Spill
|
||||||
|
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||||
|
diff.Write = s.Write - other.Write
|
||||||
|
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||||
|
return diff
|
||||||
|
}
|
824
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
Normal file
824
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,824 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: auth.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package authpb is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
auth.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
User
|
||||||
|
Permission
|
||||||
|
Role
|
||||||
|
*/
|
||||||
|
package authpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
math "math"
|
||||||
|
|
||||||
|
io "io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Permission_Type int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
READ Permission_Type = 0
|
||||||
|
WRITE Permission_Type = 1
|
||||||
|
READWRITE Permission_Type = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var Permission_Type_name = map[int32]string{
|
||||||
|
0: "READ",
|
||||||
|
1: "WRITE",
|
||||||
|
2: "READWRITE",
|
||||||
|
}
|
||||||
|
var Permission_Type_value = map[string]int32{
|
||||||
|
"READ": 0,
|
||||||
|
"WRITE": 1,
|
||||||
|
"READWRITE": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Permission_Type) String() string {
|
||||||
|
return proto.EnumName(Permission_Type_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
|
||||||
|
|
||||||
|
// User is a single entry in the bucket authUsers
|
||||||
|
type User struct {
|
||||||
|
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
||||||
|
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *User) Reset() { *m = User{} }
|
||||||
|
func (m *User) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*User) ProtoMessage() {}
|
||||||
|
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
|
||||||
|
|
||||||
|
// Permission is a single entity
|
||||||
|
type Permission struct {
|
||||||
|
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
|
||||||
|
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Permission) Reset() { *m = Permission{} }
|
||||||
|
func (m *Permission) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Permission) ProtoMessage() {}
|
||||||
|
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
|
||||||
|
|
||||||
|
// Role is a single entry in the bucket authRoles
|
||||||
|
type Role struct {
|
||||||
|
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Role) Reset() { *m = Role{} }
|
||||||
|
func (m *Role) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Role) ProtoMessage() {}
|
||||||
|
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*User)(nil), "authpb.User")
|
||||||
|
proto.RegisterType((*Permission)(nil), "authpb.Permission")
|
||||||
|
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||||
|
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||||
|
}
|
||||||
|
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Password) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||||
|
i += copy(dAtA[i:], m.Password)
|
||||||
|
}
|
||||||
|
if len(m.Roles) > 0 {
|
||||||
|
for _, s := range m.Roles {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
l = len(s)
|
||||||
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.PermType != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||||
|
}
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if len(m.RangeEnd) > 0 {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||||
|
i += copy(dAtA[i:], m.RangeEnd)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.KeyPermission) > 0 {
|
||||||
|
for _, msg := range m.KeyPermission {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||||
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *User) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Password)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Roles) > 0 {
|
||||||
|
for _, s := range m.Roles {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Permission) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.PermType != 0 {
|
||||||
|
n += 1 + sovAuth(uint64(m.PermType))
|
||||||
|
}
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.RangeEnd)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Role) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.KeyPermission) > 0 {
|
||||||
|
for _, e := range m.KeyPermission {
|
||||||
|
l = e.Size()
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovAuth(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozAuth(x uint64) (n int) {
|
||||||
|
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *User) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: User: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Name == nil {
|
||||||
|
m.Name = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Password == nil {
|
||||||
|
m.Password = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
|
||||||
|
}
|
||||||
|
m.PermType = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Key == nil {
|
||||||
|
m.Key = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.RangeEnd == nil {
|
||||||
|
m.RangeEnd = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Role: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Name == nil {
|
||||||
|
m.Name = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||||
|
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipAuth(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipAuth(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||||
|
|
||||||
|
var fileDescriptorAuth = []byte{
|
||||||
|
// 288 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||||
|
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
|
||||||
|
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
|
||||||
|
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
|
||||||
|
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
|
||||||
|
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
|
||||||
|
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
|
||||||
|
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
|
||||||
|
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
|
||||||
|
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
|
||||||
|
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
|
||||||
|
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
|
||||||
|
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
|
||||||
|
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
|
||||||
|
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
|
||||||
|
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
|
||||||
|
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
|
||||||
|
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
|
||||||
|
}
|
36
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
36
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -56,22 +56,22 @@ func NewAuthRoleAPI(c Client) AuthRoleAPI {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthRoleAPI interface {
|
type AuthRoleAPI interface {
|
||||||
// Add a role.
|
// AddRole adds a role.
|
||||||
AddRole(ctx context.Context, role string) error
|
AddRole(ctx context.Context, role string) error
|
||||||
|
|
||||||
// Remove a role.
|
// RemoveRole removes a role.
|
||||||
RemoveRole(ctx context.Context, role string) error
|
RemoveRole(ctx context.Context, role string) error
|
||||||
|
|
||||||
// Get role details.
|
// GetRole retrieves role details.
|
||||||
GetRole(ctx context.Context, role string) (*Role, error)
|
GetRole(ctx context.Context, role string) (*Role, error)
|
||||||
|
|
||||||
// Grant a role some permission prefixes for the KV store.
|
// GrantRoleKV grants a role some permission prefixes for the KV store.
|
||||||
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||||
|
|
||||||
// Revoke some some permission prefixes for a role on the KV store.
|
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
|
||||||
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||||
|
|
||||||
// List roles.
|
// ListRoles lists roles.
|
||||||
ListRoles(ctx context.Context) ([]string, error)
|
ListRoles(ctx context.Context) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,17 +115,20 @@ func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var userList struct {
|
var roleList struct {
|
||||||
Roles []string `json:"roles"`
|
Roles []Role `json:"roles"`
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(body, &userList)
|
if err = json.Unmarshal(body, &roleList); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return userList.Roles, nil
|
ret := make([]string, 0, len(roleList.Roles))
|
||||||
|
for _, r := range roleList.Roles {
|
||||||
|
ret = append(ret, r.Role)
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
|
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
|
||||||
|
@ -218,17 +221,16 @@ func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
var sec authError
|
var sec authError
|
||||||
err := json.Unmarshal(body, &sec)
|
err = json.Unmarshal(body, &sec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, sec
|
return nil, sec
|
||||||
}
|
}
|
||||||
var role Role
|
var role Role
|
||||||
err = json.Unmarshal(body, &role)
|
if err = json.Unmarshal(body, &role); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &role, nil
|
return &role, nil
|
||||||
|
|
69
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
69
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -36,6 +36,17 @@ type User struct {
|
||||||
Revoke []string `json:"revoke,omitempty"`
|
Revoke []string `json:"revoke,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// userListEntry is the user representation given by the server for ListUsers
|
||||||
|
type userListEntry struct {
|
||||||
|
User string `json:"user"`
|
||||||
|
Roles []Role `json:"roles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserRoles struct {
|
||||||
|
User string `json:"user"`
|
||||||
|
Roles []Role `json:"roles"`
|
||||||
|
}
|
||||||
|
|
||||||
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
|
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
|
||||||
if name != "" {
|
if name != "" {
|
||||||
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
|
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
|
||||||
|
@ -78,9 +89,9 @@ func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||||
var sec authError
|
var sec authError
|
||||||
err := json.Unmarshal(body, &sec)
|
err = json.Unmarshal(body, &sec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -117,25 +128,25 @@ func NewAuthUserAPI(c Client) AuthUserAPI {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthUserAPI interface {
|
type AuthUserAPI interface {
|
||||||
// Add a user.
|
// AddUser adds a user.
|
||||||
AddUser(ctx context.Context, username string, password string) error
|
AddUser(ctx context.Context, username string, password string) error
|
||||||
|
|
||||||
// Remove a user.
|
// RemoveUser removes a user.
|
||||||
RemoveUser(ctx context.Context, username string) error
|
RemoveUser(ctx context.Context, username string) error
|
||||||
|
|
||||||
// Get user details.
|
// GetUser retrieves user details.
|
||||||
GetUser(ctx context.Context, username string) (*User, error)
|
GetUser(ctx context.Context, username string) (*User, error)
|
||||||
|
|
||||||
// Grant a user some permission roles.
|
// GrantUser grants a user some permission roles.
|
||||||
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
|
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||||
|
|
||||||
// Revoke some permission roles from a user.
|
// RevokeUser revokes some permission roles from a user.
|
||||||
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
|
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||||
|
|
||||||
// Change the user's password.
|
// ChangePassword changes the user's password.
|
||||||
ChangePassword(ctx context.Context, username string, password string) (*User, error)
|
ChangePassword(ctx context.Context, username string, password string) (*User, error)
|
||||||
|
|
||||||
// List users.
|
// ListUsers lists the users.
|
||||||
ListUsers(ctx context.Context) ([]string, error)
|
ListUsers(ctx context.Context) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,22 +190,28 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
var sec authError
|
var sec authError
|
||||||
err := json.Unmarshal(body, &sec)
|
err = json.Unmarshal(body, &sec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, sec
|
return nil, sec
|
||||||
}
|
}
|
||||||
|
|
||||||
var userList struct {
|
var userList struct {
|
||||||
Users []string `json:"users"`
|
Users []userListEntry `json:"users"`
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(body, &userList)
|
|
||||||
if err != nil {
|
if err = json.Unmarshal(body, &userList); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return userList.Users, nil
|
|
||||||
|
ret := make([]string, 0, len(userList.Users))
|
||||||
|
for _, u := range userList.Users {
|
||||||
|
ret = append(ret, u.User)
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
|
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
|
||||||
|
@ -221,9 +238,9 @@ func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAct
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||||
var sec authError
|
var sec authError
|
||||||
err := json.Unmarshal(body, &sec)
|
err = json.Unmarshal(body, &sec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -280,18 +297,24 @@ func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
var sec authError
|
var sec authError
|
||||||
err := json.Unmarshal(body, &sec)
|
err = json.Unmarshal(body, &sec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, sec
|
return nil, sec
|
||||||
}
|
}
|
||||||
var user User
|
var user User
|
||||||
err = json.Unmarshal(body, &user)
|
if err = json.Unmarshal(body, &user); err != nil {
|
||||||
if err != nil {
|
var userR UserRoles
|
||||||
return nil, err
|
if urerr := json.Unmarshal(body, &userR); urerr != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
user.User = userR.User
|
||||||
|
for _, r := range userR.Roles {
|
||||||
|
user.Roles = append(user.Roles, r.Role)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return &user, nil
|
return &user, nil
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
2
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
|
@ -4,8 +4,6 @@
|
||||||
|
|
||||||
// borrowed from golang/net/context/ctxhttp/cancelreq.go
|
// borrowed from golang/net/context/ctxhttp/cancelreq.go
|
||||||
|
|
||||||
// +build go1.5
|
|
||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import "net/http"
|
import "net/http"
|
||||||
|
|
17
vendor/github.com/coreos/etcd/client/cancelreq_go14.go
generated
vendored
17
vendor/github.com/coreos/etcd/client/cancelreq_go14.go
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// borrowed from golang/net/context/ctxhttp/cancelreq_go14.go
|
|
||||||
|
|
||||||
// +build !go1.5
|
|
||||||
|
|
||||||
package client
|
|
||||||
|
|
||||||
import "net/http"
|
|
||||||
|
|
||||||
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
|
|
||||||
return func() {
|
|
||||||
tr.CancelRequest(req)
|
|
||||||
}
|
|
||||||
}
|
|
252
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
252
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -15,6 +15,7 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -22,11 +23,13 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/version"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,7 +37,12 @@ var (
|
||||||
ErrNoEndpoints = errors.New("client: no endpoints available")
|
ErrNoEndpoints = errors.New("client: no endpoints available")
|
||||||
ErrTooManyRedirects = errors.New("client: too many redirects")
|
ErrTooManyRedirects = errors.New("client: too many redirects")
|
||||||
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
||||||
|
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
||||||
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
||||||
|
|
||||||
|
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
|
||||||
|
// that Do() will not retry a request
|
||||||
|
oneShotCtxValue interface{}
|
||||||
)
|
)
|
||||||
|
|
||||||
var DefaultRequestTimeout = 5 * time.Second
|
var DefaultRequestTimeout = 5 * time.Second
|
||||||
|
@ -48,6 +56,29 @@ var DefaultTransport CancelableTransport = &http.Transport{
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EndpointSelectionMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
|
||||||
|
// As the name implies, the client object will pick a node from the members
|
||||||
|
// of the cluster in a random fashion. If the cluster has three members, A, B,
|
||||||
|
// and C, the client picks any node from its three members as its request
|
||||||
|
// destination.
|
||||||
|
EndpointSelectionRandom EndpointSelectionMode = iota
|
||||||
|
|
||||||
|
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
|
||||||
|
// requests are sent directly to the cluster leader. This reduces
|
||||||
|
// forwarding roundtrips compared to making requests to etcd followers
|
||||||
|
// who then forward them to the cluster leader. In the event of a leader
|
||||||
|
// failure, however, clients configured this way cannot prioritize among
|
||||||
|
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
|
||||||
|
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
|
||||||
|
// maintain its knowledge of current cluster state.
|
||||||
|
//
|
||||||
|
// This mode should be used with Client.AutoSync().
|
||||||
|
EndpointSelectionPrioritizeLeader
|
||||||
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// Endpoints defines a set of URLs (schemes, hosts and ports only)
|
// Endpoints defines a set of URLs (schemes, hosts and ports only)
|
||||||
// that can be used to communicate with a logical etcd cluster. For
|
// that can be used to communicate with a logical etcd cluster. For
|
||||||
|
@ -73,7 +104,7 @@ type Config struct {
|
||||||
// CheckRedirect specifies the policy for handling HTTP redirects.
|
// CheckRedirect specifies the policy for handling HTTP redirects.
|
||||||
// If CheckRedirect is not nil, the Client calls it before
|
// If CheckRedirect is not nil, the Client calls it before
|
||||||
// following an HTTP redirect. The sole argument is the number of
|
// following an HTTP redirect. The sole argument is the number of
|
||||||
// requests that have alrady been made. If CheckRedirect returns
|
// requests that have already been made. If CheckRedirect returns
|
||||||
// an error, Client.Do will not make any further requests and return
|
// an error, Client.Do will not make any further requests and return
|
||||||
// the error back it to the caller.
|
// the error back it to the caller.
|
||||||
//
|
//
|
||||||
|
@ -99,11 +130,17 @@ type Config struct {
|
||||||
// watch start. But if server is behind some kind of proxy, the response
|
// watch start. But if server is behind some kind of proxy, the response
|
||||||
// header may be cached at proxy, and Client cannot rely on this behavior.
|
// header may be cached at proxy, and Client cannot rely on this behavior.
|
||||||
//
|
//
|
||||||
|
// Especially, wait request will ignore this timeout.
|
||||||
|
//
|
||||||
// One API call may send multiple requests to different etcd servers until it
|
// One API call may send multiple requests to different etcd servers until it
|
||||||
// succeeds. Use context of the API to specify the overall timeout.
|
// succeeds. Use context of the API to specify the overall timeout.
|
||||||
//
|
//
|
||||||
// A HeaderTimeoutPerRequest of zero means no timeout.
|
// A HeaderTimeoutPerRequest of zero means no timeout.
|
||||||
HeaderTimeoutPerRequest time.Duration
|
HeaderTimeoutPerRequest time.Duration
|
||||||
|
|
||||||
|
// SelectionMode is an EndpointSelectionMode enum that specifies the
|
||||||
|
// policy for choosing the etcd cluster node to which requests are sent.
|
||||||
|
SelectionMode EndpointSelectionMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Config) transport() CancelableTransport {
|
func (cfg *Config) transport() CancelableTransport {
|
||||||
|
@ -162,6 +199,14 @@ type Client interface {
|
||||||
// this may differ from the initial Endpoints provided in the Config.
|
// this may differ from the initial Endpoints provided in the Config.
|
||||||
Endpoints() []string
|
Endpoints() []string
|
||||||
|
|
||||||
|
// SetEndpoints sets the set of API endpoints used by Client to resolve
|
||||||
|
// HTTP requests. If the given endpoints are not valid, an error will be
|
||||||
|
// returned
|
||||||
|
SetEndpoints(eps []string) error
|
||||||
|
|
||||||
|
// GetVersion retrieves the current etcd server and cluster version
|
||||||
|
GetVersion(ctx context.Context) (*version.Versions, error)
|
||||||
|
|
||||||
httpClient
|
httpClient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,6 +214,7 @@ func New(cfg Config) (Client, error) {
|
||||||
c := &httpClusterClient{
|
c := &httpClusterClient{
|
||||||
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
|
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
|
||||||
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||||
|
selectionMode: cfg.SelectionMode,
|
||||||
}
|
}
|
||||||
if cfg.Username != "" {
|
if cfg.Username != "" {
|
||||||
c.credentials = &credentials{
|
c.credentials = &credentials{
|
||||||
|
@ -176,7 +222,7 @@ func New(cfg Config) (Client, error) {
|
||||||
password: cfg.Password,
|
password: cfg.Password,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := c.reset(cfg.Endpoints); err != nil {
|
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return c, nil
|
return c, nil
|
||||||
|
@ -216,25 +262,69 @@ type httpClusterClient struct {
|
||||||
pinned int
|
pinned int
|
||||||
credentials *credentials
|
credentials *credentials
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
rand *rand.Rand
|
rand *rand.Rand
|
||||||
|
selectionMode EndpointSelectionMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) reset(eps []string) error {
|
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||||
|
ceps := make([]url.URL, len(eps))
|
||||||
|
copy(ceps, eps)
|
||||||
|
|
||||||
|
// To perform a lookup on the new endpoint list without using the current
|
||||||
|
// client, we'll copy it
|
||||||
|
clientCopy := &httpClusterClient{
|
||||||
|
clientFactory: c.clientFactory,
|
||||||
|
credentials: c.credentials,
|
||||||
|
rand: c.rand,
|
||||||
|
|
||||||
|
pinned: 0,
|
||||||
|
endpoints: ceps,
|
||||||
|
}
|
||||||
|
|
||||||
|
mAPI := NewMembersAPI(clientCopy)
|
||||||
|
leader, err := mAPI.Leader(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(leader.ClientURLs) == 0 {
|
||||||
|
return "", ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||||
if len(eps) == 0 {
|
if len(eps) == 0 {
|
||||||
return ErrNoEndpoints
|
return []url.URL{}, ErrNoEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
neps := make([]url.URL, len(eps))
|
neps := make([]url.URL, len(eps))
|
||||||
for i, ep := range eps {
|
for i, ep := range eps {
|
||||||
u, err := url.Parse(ep)
|
u, err := url.Parse(ep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return []url.URL{}, err
|
||||||
}
|
}
|
||||||
neps[i] = *u
|
neps[i] = *u
|
||||||
}
|
}
|
||||||
|
return neps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||||
|
neps, err := c.parseEndpoints(eps)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||||
// TODO: pin old endpoint if possible, and rebalance when new endpoint appears
|
// We're not doing anything for PrioritizeLeader here. This is
|
||||||
|
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||||
|
// However, if you're using PrioritizeLeader, you've already been told
|
||||||
|
// to regularly call sync, where we do have a ctx, and can figure the
|
||||||
|
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||||
|
// with it
|
||||||
c.pinned = 0
|
c.pinned = 0
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -268,6 +358,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||||
var body []byte
|
var body []byte
|
||||||
var err error
|
var err error
|
||||||
cerr := &ClusterError{}
|
cerr := &ClusterError{}
|
||||||
|
isOneShot := ctx.Value(&oneShotCtxValue) != nil
|
||||||
|
|
||||||
for i := pinned; i < leps+pinned; i++ {
|
for i := pinned; i < leps+pinned; i++ {
|
||||||
k := i % leps
|
k := i % leps
|
||||||
|
@ -275,13 +366,13 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||||
resp, body, err = hc.Do(ctx, action)
|
resp, body, err = hc.Do(ctx, action)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cerr.Errors = append(cerr.Errors, err)
|
cerr.Errors = append(cerr.Errors, err)
|
||||||
// mask previous errors with context error, which is controlled by user
|
if err == ctx.Err() {
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
|
}
|
||||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
continue
|
} else if resp.StatusCode/100 == 5 {
|
||||||
}
|
|
||||||
if resp.StatusCode/100 == 5 {
|
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||||
// TODO: make sure this is a no leader response
|
// TODO: make sure this is a no leader response
|
||||||
|
@ -289,7 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||||
default:
|
default:
|
||||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||||
}
|
}
|
||||||
continue
|
err = cerr.Errors[0]
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if !isOneShot {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Lock()
|
||||||
|
c.pinned = (k + 1) % leps
|
||||||
|
c.Unlock()
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if k != pinned {
|
if k != pinned {
|
||||||
c.Lock()
|
c.Lock()
|
||||||
|
@ -321,27 +421,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Lock()
|
var eps []string
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
eps := make([]string, 0)
|
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
eps = append(eps, m.ClientURLs...)
|
eps = append(eps, m.ClientURLs...)
|
||||||
}
|
}
|
||||||
sort.Sort(sort.StringSlice(eps))
|
|
||||||
|
|
||||||
ceps := make([]string, len(c.endpoints))
|
neps, err := c.parseEndpoints(eps)
|
||||||
for i, cep := range c.endpoints {
|
if err != nil {
|
||||||
ceps[i] = cep.String()
|
return err
|
||||||
}
|
|
||||||
sort.Sort(sort.StringSlice(ceps))
|
|
||||||
// fast path if no change happens
|
|
||||||
// this helps client to pin the endpoint when no cluster change
|
|
||||||
if reflect.DeepEqual(eps, ceps) {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.reset(eps)
|
npin := 0
|
||||||
|
|
||||||
|
switch c.selectionMode {
|
||||||
|
case EndpointSelectionRandom:
|
||||||
|
c.RLock()
|
||||||
|
eq := endpointsEqual(c.endpoints, neps)
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
if eq {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// When items in the endpoint list changes, we choose a new pin
|
||||||
|
neps = shuffleEndpoints(c.rand, neps)
|
||||||
|
case EndpointSelectionPrioritizeLeader:
|
||||||
|
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||||
|
if err != nil {
|
||||||
|
return ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range neps {
|
||||||
|
if n.String() == nle {
|
||||||
|
npin = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
c.endpoints = neps
|
||||||
|
c.pinned = npin
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||||
|
@ -360,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
|
||||||
|
act := &getAction{Prefix: "/version"}
|
||||||
|
|
||||||
|
resp, body, err := c.Do(ctx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil, ErrEmptyBody
|
||||||
|
}
|
||||||
|
var vresp version.Versions
|
||||||
|
if err := json.Unmarshal(body, &vresp); err != nil {
|
||||||
|
return nil, ErrInvalidJSON
|
||||||
|
}
|
||||||
|
return &vresp, nil
|
||||||
|
default:
|
||||||
|
var etcdErr Error
|
||||||
|
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||||
|
return nil, ErrInvalidJSON
|
||||||
|
}
|
||||||
|
return nil, etcdErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type roundTripResponse struct {
|
type roundTripResponse struct {
|
||||||
resp *http.Response
|
resp *http.Response
|
||||||
err error
|
err error
|
||||||
|
@ -378,9 +529,24 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hctx, hcancel := context.WithCancel(ctx)
|
isWait := false
|
||||||
if c.headerTimeout > 0 {
|
if req != nil && req.URL != nil {
|
||||||
|
ws := req.URL.Query().Get("wait")
|
||||||
|
if len(ws) != 0 {
|
||||||
|
var err error
|
||||||
|
isWait, err = strconv.ParseBool(ws)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hctx context.Context
|
||||||
|
var hcancel context.CancelFunc
|
||||||
|
if !isWait && c.headerTimeout > 0 {
|
||||||
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
|
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
|
||||||
|
} else {
|
||||||
|
hctx, hcancel = context.WithCancel(ctx)
|
||||||
}
|
}
|
||||||
defer hcancel()
|
defer hcancel()
|
||||||
|
|
||||||
|
@ -512,3 +678,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||||
}
|
}
|
||||||
return neps
|
return neps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func endpointsEqual(left, right []url.URL) bool {
|
||||||
|
if len(left) != len(right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sLeft := make([]string, len(left))
|
||||||
|
sRight := make([]string, len(right))
|
||||||
|
for i, l := range left {
|
||||||
|
sLeft[i] = l.String()
|
||||||
|
}
|
||||||
|
for i, r := range right {
|
||||||
|
sRight[i] = r.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(sLeft)
|
||||||
|
sort.Strings(sRight)
|
||||||
|
for i := range sLeft {
|
||||||
|
if sLeft[i] != sRight[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
8
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
8
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -21,7 +21,11 @@ type ClusterError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Error() string {
|
func (ce *ClusterError) Error() string {
|
||||||
return ErrClusterUnavailable.Error()
|
s := ErrClusterUnavailable.Error()
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Detail() string {
|
func (ce *ClusterError) Detail() string {
|
||||||
|
|
2
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
2
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
21
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
21
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -14,8 +14,27 @@
|
||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/etcd/pkg/srv"
|
||||||
|
)
|
||||||
|
|
||||||
// Discoverer is an interface that wraps the Discover method.
|
// Discoverer is an interface that wraps the Discover method.
|
||||||
type Discoverer interface {
|
type Discoverer interface {
|
||||||
// Discover looks up the etcd servers for the domain.
|
// Discover looks up the etcd servers for the domain.
|
||||||
Discover(domain string) ([]string, error)
|
Discover(domain string) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type srvDiscover struct{}
|
||||||
|
|
||||||
|
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||||
|
func NewSRVDiscover() Discoverer {
|
||||||
|
return &srvDiscover{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
||||||
|
srvs, err := srv.GetClient("etcd-client", domain)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srvs.Endpoints, nil
|
||||||
|
}
|
||||||
|
|
4
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
4
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -34,6 +34,8 @@ Create a Config and exchange it for a Client:
|
||||||
// handle error
|
// handle error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Clients are safe for concurrent use by multiple goroutines.
|
||||||
|
|
||||||
Create a KeysAPI using the Client, then use it to interact with etcd:
|
Create a KeysAPI using the Client, then use it to interact with etcd:
|
||||||
|
|
||||||
kAPI := client.NewKeysAPI(c)
|
kAPI := client.NewKeysAPI(c)
|
||||||
|
|
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
File diff suppressed because it is too large
Load diff
57
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
57
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -106,7 +106,7 @@ type KeysAPI interface {
|
||||||
|
|
||||||
// Set assigns a new value to a Node identified by a given key. The caller
|
// Set assigns a new value to a Node identified by a given key. The caller
|
||||||
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
|
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
|
||||||
// than value is ignored.
|
// then value is ignored.
|
||||||
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
|
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
|
||||||
|
|
||||||
// Delete removes a Node identified by the given key, optionally destroying
|
// Delete removes a Node identified by the given key, optionally destroying
|
||||||
|
@ -184,8 +184,17 @@ type SetOptions struct {
|
||||||
// a TTL of 0.
|
// a TTL of 0.
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
|
|
||||||
|
// Refresh set to true means a TTL value can be updated
|
||||||
|
// without firing a watch or changing the node value. A
|
||||||
|
// value must not be provided when refreshing a key.
|
||||||
|
Refresh bool
|
||||||
|
|
||||||
// Dir specifies whether or not this Node should be created as a directory.
|
// Dir specifies whether or not this Node should be created as a directory.
|
||||||
Dir bool
|
Dir bool
|
||||||
|
|
||||||
|
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||||
|
// If set, the response will only contain the current value when the request fails.
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetOptions struct {
|
type GetOptions struct {
|
||||||
|
@ -234,7 +243,7 @@ type DeleteOptions struct {
|
||||||
|
|
||||||
type Watcher interface {
|
type Watcher interface {
|
||||||
// Next blocks until an etcd event occurs, then returns a Response
|
// Next blocks until an etcd event occurs, then returns a Response
|
||||||
// represeting that event. The behavior of Next depends on the
|
// representing that event. The behavior of Next depends on the
|
||||||
// WatcherOptions used to construct the Watcher. Next is designed to
|
// WatcherOptions used to construct the Watcher. Next is designed to
|
||||||
// be called repeatedly, each time blocking until a subsequent event
|
// be called repeatedly, each time blocking until a subsequent event
|
||||||
// is available.
|
// is available.
|
||||||
|
@ -263,6 +272,10 @@ type Response struct {
|
||||||
// Index holds the cluster-level index at the time the Response was generated.
|
// Index holds the cluster-level index at the time the Response was generated.
|
||||||
// This index is not tied to the Node(s) contained in this Response.
|
// This index is not tied to the Node(s) contained in this Response.
|
||||||
Index uint64 `json:"-"`
|
Index uint64 `json:"-"`
|
||||||
|
|
||||||
|
// ClusterID holds the cluster-level ID reported by the server. This
|
||||||
|
// should be different for different etcd clusters.
|
||||||
|
ClusterID string `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
|
@ -306,6 +319,7 @@ func (n *Node) TTLDuration() time.Duration {
|
||||||
type Nodes []*Node
|
type Nodes []*Node
|
||||||
|
|
||||||
// interfaces for sorting
|
// interfaces for sorting
|
||||||
|
|
||||||
func (ns Nodes) Len() int { return len(ns) }
|
func (ns Nodes) Len() int { return len(ns) }
|
||||||
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
|
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
|
||||||
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||||
|
@ -327,10 +341,16 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
||||||
act.PrevIndex = opts.PrevIndex
|
act.PrevIndex = opts.PrevIndex
|
||||||
act.PrevExist = opts.PrevExist
|
act.PrevExist = opts.PrevExist
|
||||||
act.TTL = opts.TTL
|
act.TTL = opts.TTL
|
||||||
|
act.Refresh = opts.Refresh
|
||||||
act.Dir = opts.Dir
|
act.Dir = opts.Dir
|
||||||
|
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, body, err := k.client.Do(ctx, act)
|
doCtx := ctx
|
||||||
|
if act.PrevExist == PrevNoExist {
|
||||||
|
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
}
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -378,7 +398,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
|
||||||
act.Recursive = opts.Recursive
|
act.Recursive = opts.Recursive
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, body, err := k.client.Do(ctx, act)
|
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -511,14 +532,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
}
|
}
|
||||||
|
|
||||||
type setAction struct {
|
type setAction struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Key string
|
Key string
|
||||||
Value string
|
Value string
|
||||||
PrevValue string
|
PrevValue string
|
||||||
PrevIndex uint64
|
PrevIndex uint64
|
||||||
PrevExist PrevExistType
|
PrevExist PrevExistType
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
Dir bool
|
Refresh bool
|
||||||
|
Dir bool
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
@ -549,6 +572,13 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if a.Refresh {
|
||||||
|
form.Add("refresh", "true")
|
||||||
|
}
|
||||||
|
if a.NoValueOnSuccess {
|
||||||
|
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||||
|
}
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
body := strings.NewReader(form.Encode())
|
body := strings.NewReader(form.Encode())
|
||||||
|
|
||||||
|
@ -639,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
34
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
34
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -29,6 +29,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultV2MembersPrefix = "/v2/members"
|
defaultV2MembersPrefix = "/v2/members"
|
||||||
|
defaultLeaderSuffix = "/leader"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Member struct {
|
type Member struct {
|
||||||
|
@ -105,6 +106,9 @@ type MembersAPI interface {
|
||||||
|
|
||||||
// Update instructs etcd to update an existing Member in the cluster.
|
// Update instructs etcd to update an existing Member in the cluster.
|
||||||
Update(ctx context.Context, mID string, peerURLs []string) error
|
Update(ctx context.Context, mID string, peerURLs []string) error
|
||||||
|
|
||||||
|
// Leader gets current leader of the cluster
|
||||||
|
Leader(ctx context.Context) (*Member, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type httpMembersAPI struct {
|
type httpMembersAPI struct {
|
||||||
|
@ -199,6 +203,25 @@ func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
|
||||||
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
|
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
|
||||||
|
req := &membersAPIActionLeader{}
|
||||||
|
resp, body, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var leader Member
|
||||||
|
if err := json.Unmarshal(body, &leader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &leader, nil
|
||||||
|
}
|
||||||
|
|
||||||
type membersAPIActionList struct{}
|
type membersAPIActionList struct{}
|
||||||
|
|
||||||
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
|
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
@ -255,6 +278,15 @@ func assertStatusCode(got int, want ...int) (err error) {
|
||||||
return fmt.Errorf("unexpected status code %d", got)
|
return fmt.Errorf("unexpected status code %d", got)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type membersAPIActionLeader struct{}
|
||||||
|
|
||||||
|
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
u.Path = path.Join(u.Path, defaultLeaderSuffix)
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
// v2MembersURL add the necessary path to the provided endpoint
|
// v2MembersURL add the necessary path to the provided endpoint
|
||||||
// to route requests to the default v2 members API.
|
// to route requests to the default v2 members API.
|
||||||
func v2MembersURL(ep url.URL) *url.URL {
|
func v2MembersURL(ep url.URL) *url.URL {
|
||||||
|
|
65
vendor/github.com/coreos/etcd/client/srv.go
generated
vendored
65
vendor/github.com/coreos/etcd/client/srv.go
generated
vendored
|
@ -1,65 +0,0 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// indirection for testing
|
|
||||||
lookupSRV = net.LookupSRV
|
|
||||||
)
|
|
||||||
|
|
||||||
type srvDiscover struct{}
|
|
||||||
|
|
||||||
// NewSRVDiscover constructs a new Dicoverer that uses the stdlib to lookup SRV records.
|
|
||||||
func NewSRVDiscover() Discoverer {
|
|
||||||
return &srvDiscover{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discover looks up the etcd servers for the domain.
|
|
||||||
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
|
||||||
var urls []*url.URL
|
|
||||||
|
|
||||||
updateURLs := func(service, scheme string) error {
|
|
||||||
_, addrs, err := lookupSRV(service, "tcp", domain)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, srv := range addrs {
|
|
||||||
urls = append(urls, &url.URL{
|
|
||||||
Scheme: scheme,
|
|
||||||
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
errHTTPS := updateURLs("etcd-server-ssl", "https")
|
|
||||||
errHTTP := updateURLs("etcd-server", "http")
|
|
||||||
|
|
||||||
if errHTTPS != nil && errHTTP != nil {
|
|
||||||
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoints := make([]string, len(urls))
|
|
||||||
for i := range urls {
|
|
||||||
endpoints[i] = urls[i].String()
|
|
||||||
}
|
|
||||||
return endpoints, nil
|
|
||||||
}
|
|
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
roleNotFoundRegExp *regexp.Regexp
|
||||||
|
userNotFoundRegExp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||||
|
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||||
|
func IsKeyNotFound(err error) bool {
|
||||||
|
if cErr, ok := err.(Error); ok {
|
||||||
|
return cErr.Code == ErrorCodeKeyNotFound
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||||
|
func IsRoleNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||||
|
func IsUserNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return userNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
222
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
Normal file
222
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/auth/authpb"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
AuthEnableResponse pb.AuthEnableResponse
|
||||||
|
AuthDisableResponse pb.AuthDisableResponse
|
||||||
|
AuthenticateResponse pb.AuthenticateResponse
|
||||||
|
AuthUserAddResponse pb.AuthUserAddResponse
|
||||||
|
AuthUserDeleteResponse pb.AuthUserDeleteResponse
|
||||||
|
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
|
||||||
|
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
|
||||||
|
AuthUserGetResponse pb.AuthUserGetResponse
|
||||||
|
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
|
||||||
|
AuthRoleAddResponse pb.AuthRoleAddResponse
|
||||||
|
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
|
||||||
|
AuthRoleGetResponse pb.AuthRoleGetResponse
|
||||||
|
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
|
||||||
|
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
|
||||||
|
AuthUserListResponse pb.AuthUserListResponse
|
||||||
|
AuthRoleListResponse pb.AuthRoleListResponse
|
||||||
|
|
||||||
|
PermissionType authpb.Permission_Type
|
||||||
|
Permission authpb.Permission
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
PermRead = authpb.READ
|
||||||
|
PermWrite = authpb.WRITE
|
||||||
|
PermReadWrite = authpb.READWRITE
|
||||||
|
)
|
||||||
|
|
||||||
|
type Auth interface {
|
||||||
|
// AuthEnable enables auth of an etcd cluster.
|
||||||
|
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
|
||||||
|
|
||||||
|
// AuthDisable disables auth of an etcd cluster.
|
||||||
|
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
|
||||||
|
|
||||||
|
// UserAdd adds a new user to an etcd cluster.
|
||||||
|
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
|
||||||
|
|
||||||
|
// UserDelete deletes a user from an etcd cluster.
|
||||||
|
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
|
||||||
|
|
||||||
|
// UserChangePassword changes a password of a user.
|
||||||
|
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
|
||||||
|
|
||||||
|
// UserGrantRole grants a role to a user.
|
||||||
|
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
|
||||||
|
|
||||||
|
// UserGet gets a detailed information of a user.
|
||||||
|
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
|
||||||
|
|
||||||
|
// UserList gets a list of all users.
|
||||||
|
UserList(ctx context.Context) (*AuthUserListResponse, error)
|
||||||
|
|
||||||
|
// UserRevokeRole revokes a role of a user.
|
||||||
|
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
|
||||||
|
|
||||||
|
// RoleAdd adds a new role to an etcd cluster.
|
||||||
|
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
|
||||||
|
|
||||||
|
// RoleGrantPermission grants a permission to a role.
|
||||||
|
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
|
||||||
|
|
||||||
|
// RoleGet gets a detailed information of a role.
|
||||||
|
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
|
||||||
|
|
||||||
|
// RoleList gets a list of all roles.
|
||||||
|
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
|
||||||
|
|
||||||
|
// RoleRevokePermission revokes a permission from a role.
|
||||||
|
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
|
||||||
|
|
||||||
|
// RoleDelete deletes a role.
|
||||||
|
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type auth struct {
|
||||||
|
remote pb.AuthClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAuth(c *Client) Auth {
|
||||||
|
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||||
|
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||||
|
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||||
|
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||||
|
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||||
|
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
|
||||||
|
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||||
|
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
|
||||||
|
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||||
|
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
|
||||||
|
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||||
|
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
|
||||||
|
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||||
|
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||||
|
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||||
|
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||||
|
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||||
|
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
|
||||||
|
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||||
|
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
|
||||||
|
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
|
||||||
|
perm := &authpb.Permission{
|
||||||
|
Key: []byte(key),
|
||||||
|
RangeEnd: []byte(rangeEnd),
|
||||||
|
PermType: authpb.Permission_Type(permType),
|
||||||
|
}
|
||||||
|
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
|
||||||
|
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||||
|
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||||
|
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||||
|
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||||
|
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||||
|
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
|
||||||
|
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||||
|
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
|
||||||
|
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StrToPermissionType(s string) (PermissionType, error) {
|
||||||
|
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
|
||||||
|
if ok {
|
||||||
|
return PermissionType(val), nil
|
||||||
|
}
|
||||||
|
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
type authenticator struct {
|
||||||
|
conn *grpc.ClientConn // conn in-use
|
||||||
|
remote pb.AuthClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||||
|
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||||
|
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *authenticator) close() {
|
||||||
|
auth.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
|
||||||
|
conn, err := grpc.Dial(endpoint, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &authenticator{
|
||||||
|
conn: conn,
|
||||||
|
remote: pb.NewAuthClient(conn),
|
||||||
|
}, nil
|
||||||
|
}
|
356
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
Normal file
356
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||||
|
// any active connection to endpoints at the time.
|
||||||
|
// This error is returned only when opts.BlockingWait is true.
|
||||||
|
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||||
|
|
||||||
|
// simpleBalancer does the bare minimum to expose multiple eps
|
||||||
|
// to the grpc reconnection code path
|
||||||
|
type simpleBalancer struct {
|
||||||
|
// addrs are the client's endpoints for grpc
|
||||||
|
addrs []grpc.Address
|
||||||
|
// notifyCh notifies grpc of the set of addresses for connecting
|
||||||
|
notifyCh chan []grpc.Address
|
||||||
|
|
||||||
|
// readyc closes once the first connection is up
|
||||||
|
readyc chan struct{}
|
||||||
|
readyOnce sync.Once
|
||||||
|
|
||||||
|
// mu protects upEps, pinAddr, and connectingAddr
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||||
|
upc chan struct{}
|
||||||
|
|
||||||
|
// downc closes when grpc calls down() on pinAddr
|
||||||
|
downc chan struct{}
|
||||||
|
|
||||||
|
// stopc is closed to signal updateNotifyLoop should stop.
|
||||||
|
stopc chan struct{}
|
||||||
|
|
||||||
|
// donec closes when all goroutines are exited
|
||||||
|
donec chan struct{}
|
||||||
|
|
||||||
|
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||||
|
updateAddrsC chan struct{}
|
||||||
|
|
||||||
|
// grpc issues TLS cert checks using the string passed into dial so
|
||||||
|
// that string must be the host. To recover the full scheme://host URL,
|
||||||
|
// have a map from hosts to the original endpoint.
|
||||||
|
host2ep map[string]string
|
||||||
|
|
||||||
|
// pinAddr is the currently pinned address; set to the empty string on
|
||||||
|
// intialization and shutdown.
|
||||||
|
pinAddr string
|
||||||
|
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||||
|
notifyCh := make(chan []grpc.Address, 1)
|
||||||
|
addrs := make([]grpc.Address, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
addrs[i].Addr = getHost(eps[i])
|
||||||
|
}
|
||||||
|
sb := &simpleBalancer{
|
||||||
|
addrs: addrs,
|
||||||
|
notifyCh: notifyCh,
|
||||||
|
readyc: make(chan struct{}),
|
||||||
|
upc: make(chan struct{}),
|
||||||
|
stopc: make(chan struct{}),
|
||||||
|
downc: make(chan struct{}),
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
updateAddrsC: make(chan struct{}, 1),
|
||||||
|
host2ep: getHost2ep(eps),
|
||||||
|
}
|
||||||
|
close(sb.downc)
|
||||||
|
go sb.updateNotifyLoop()
|
||||||
|
return sb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||||
|
|
||||||
|
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.upc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.host2ep[host]
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHost2ep(eps []string) map[string]string {
|
||||||
|
hm := make(map[string]string, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
_, host, _ := parseEndpoint(eps[i])
|
||||||
|
hm[host] = eps[i]
|
||||||
|
}
|
||||||
|
return hm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||||
|
np := getHost2ep(eps)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
|
||||||
|
match := len(np) == len(b.host2ep)
|
||||||
|
for k, v := range np {
|
||||||
|
if b.host2ep[k] != v {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
// same endpoints, so no need to update address
|
||||||
|
b.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.host2ep = np
|
||||||
|
|
||||||
|
addrs := make([]grpc.Address, 0, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||||
|
}
|
||||||
|
b.addrs = addrs
|
||||||
|
|
||||||
|
// updating notifyCh can trigger new connections,
|
||||||
|
// only update addrs if all connections are down
|
||||||
|
// or addrs does not include pinAddr.
|
||||||
|
update := !hasAddr(addrs, b.pinAddr)
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
if update {
|
||||||
|
select {
|
||||||
|
case b.updateAddrsC <- struct{}{}:
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||||
|
for _, addr := range addrs {
|
||||||
|
if targetAddr == addr.Addr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) updateNotifyLoop() {
|
||||||
|
defer close(b.donec)
|
||||||
|
|
||||||
|
for {
|
||||||
|
b.mu.RLock()
|
||||||
|
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
// downc or upc should be closed
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
downc = nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-upc:
|
||||||
|
upc = nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case downc == nil && upc == nil:
|
||||||
|
// stale
|
||||||
|
select {
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
case downc == nil:
|
||||||
|
b.notifyAddrs()
|
||||||
|
select {
|
||||||
|
case <-upc:
|
||||||
|
case <-b.updateAddrsC:
|
||||||
|
b.notifyAddrs()
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case upc == nil:
|
||||||
|
select {
|
||||||
|
// close connections that are not the pinned address
|
||||||
|
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||||
|
case <-downc:
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
case <-b.updateAddrsC:
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.notifyAddrs()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) notifyAddrs() {
|
||||||
|
b.mu.RLock()
|
||||||
|
addrs := b.addrs
|
||||||
|
b.mu.RUnlock()
|
||||||
|
select {
|
||||||
|
case b.notifyCh <- addrs:
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
// gRPC might call Up after it called Close. We add this check
|
||||||
|
// to "fix" it up at application layer. Or our simplerBalancer
|
||||||
|
// might panic since b.upc is closed.
|
||||||
|
if b.closed {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
// gRPC might call Up on a stale address.
|
||||||
|
// Prevent updating pinAddr with a stale address.
|
||||||
|
if !hasAddr(b.addrs, addr.Addr) {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
if b.pinAddr != "" {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
// notify waiting Get()s and pin first connected address
|
||||||
|
close(b.upc)
|
||||||
|
b.downc = make(chan struct{})
|
||||||
|
b.pinAddr = addr.Addr
|
||||||
|
// notify client that a connection is up
|
||||||
|
b.readyOnce.Do(func() { close(b.readyc) })
|
||||||
|
return func(err error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
b.upc = make(chan struct{})
|
||||||
|
close(b.downc)
|
||||||
|
b.pinAddr = ""
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||||
|
var (
|
||||||
|
addr string
|
||||||
|
closed bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||||
|
// an address it has notified via Notify immediately instead of blocking.
|
||||||
|
if !opts.BlockingWait {
|
||||||
|
b.mu.RLock()
|
||||||
|
closed = b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if addr == "" {
|
||||||
|
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
b.mu.RLock()
|
||||||
|
ch := b.upc
|
||||||
|
b.mu.RUnlock()
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
case <-b.donec:
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
case <-ctx.Done():
|
||||||
|
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
closed = b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if addr != "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Close() error {
|
||||||
|
b.mu.Lock()
|
||||||
|
// In case gRPC calls close twice. TODO: remove the checking
|
||||||
|
// when we are sure that gRPC wont call close twice.
|
||||||
|
if b.closed {
|
||||||
|
b.mu.Unlock()
|
||||||
|
<-b.donec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.closed = true
|
||||||
|
close(b.stopc)
|
||||||
|
b.pinAddr = ""
|
||||||
|
|
||||||
|
// In the case of following scenario:
|
||||||
|
// 1. upc is not closed; no pinned address
|
||||||
|
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
|
||||||
|
// 3. clientconn.Close() calls balancer.Close(); closed = true
|
||||||
|
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||||
|
// we must close upc so Get() exits from blocking on upc
|
||||||
|
select {
|
||||||
|
case <-b.upc:
|
||||||
|
default:
|
||||||
|
// terminate all waiting Get()s
|
||||||
|
close(b.upc)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
// wait for updateNotifyLoop to finish
|
||||||
|
<-b.donec
|
||||||
|
close(b.notifyCh)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHost(ep string) string {
|
||||||
|
url, uerr := url.Parse(ep)
|
||||||
|
if uerr != nil || !strings.Contains(ep, "://") {
|
||||||
|
return ep
|
||||||
|
}
|
||||||
|
return url.Host
|
||||||
|
}
|
515
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
Normal file
515
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
Normal file
|
@ -0,0 +1,515 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||||
|
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client provides and manages an etcd v3 client session.
|
||||||
|
type Client struct {
|
||||||
|
Cluster
|
||||||
|
KV
|
||||||
|
Lease
|
||||||
|
Watcher
|
||||||
|
Auth
|
||||||
|
Maintenance
|
||||||
|
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
dialerrc chan error
|
||||||
|
|
||||||
|
cfg Config
|
||||||
|
creds *credentials.TransportCredentials
|
||||||
|
balancer *simpleBalancer
|
||||||
|
retryWrapper retryRpcFunc
|
||||||
|
retryAuthWrapper retryRpcFunc
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// Username is a username for authentication
|
||||||
|
Username string
|
||||||
|
// Password is a password for authentication
|
||||||
|
Password string
|
||||||
|
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||||
|
tokenCred *authTokenCredential
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new etcdv3 client from a given configuration.
|
||||||
|
func New(cfg Config) (*Client, error) {
|
||||||
|
if len(cfg.Endpoints) == 0 {
|
||||||
|
return nil, ErrNoAvailableEndpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
return newClient(&cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCtxClient creates a client with a context but no underlying grpc
|
||||||
|
// connection. This is useful for embedded cases that override the
|
||||||
|
// service interface implementations and do not need connection management.
|
||||||
|
func NewCtxClient(ctx context.Context) *Client {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
return &Client{ctx: cctx, cancel: cancel}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFromURL creates a new etcdv3 client from a URL.
|
||||||
|
func NewFromURL(url string) (*Client, error) {
|
||||||
|
return New(Config{Endpoints: []string{url}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the client's etcd connections.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
c.cancel()
|
||||||
|
c.Watcher.Close()
|
||||||
|
c.Lease.Close()
|
||||||
|
if c.conn != nil {
|
||||||
|
return toErr(c.ctx, c.conn.Close())
|
||||||
|
}
|
||||||
|
return c.ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ctx is a context for "out of band" messages (e.g., for sending
|
||||||
|
// "clean up" message when another context is canceled). It is
|
||||||
|
// canceled on client Close().
|
||||||
|
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||||
|
|
||||||
|
// Endpoints lists the registered endpoints for the client.
|
||||||
|
func (c *Client) Endpoints() (eps []string) {
|
||||||
|
// copy the slice; protect original endpoints from being changed
|
||||||
|
eps = make([]string, len(c.cfg.Endpoints))
|
||||||
|
copy(eps, c.cfg.Endpoints)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndpoints updates client's endpoints.
|
||||||
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
|
c.cfg.Endpoints = eps
|
||||||
|
c.balancer.updateAddrs(eps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||||
|
func (c *Client) Sync(ctx context.Context) error {
|
||||||
|
mresp, err := c.MemberList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var eps []string
|
||||||
|
for _, m := range mresp.Members {
|
||||||
|
eps = append(eps, m.ClientURLs...)
|
||||||
|
}
|
||||||
|
c.SetEndpoints(eps...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) autoSync() {
|
||||||
|
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(c.cfg.AutoSyncInterval):
|
||||||
|
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||||
|
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||||
|
logger.Println("Auto sync endpoints failed:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type authTokenCredential struct {
|
||||||
|
token string
|
||||||
|
tokenMu *sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||||
|
cred.tokenMu.RLock()
|
||||||
|
defer cred.tokenMu.RUnlock()
|
||||||
|
return map[string]string{
|
||||||
|
"token": cred.token,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||||
|
proto = "tcp"
|
||||||
|
host = endpoint
|
||||||
|
url, uerr := url.Parse(endpoint)
|
||||||
|
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
scheme = url.Scheme
|
||||||
|
|
||||||
|
// strip scheme:// prefix since grpc dials by host
|
||||||
|
host = url.Host
|
||||||
|
switch url.Scheme {
|
||||||
|
case "http", "https":
|
||||||
|
case "unix", "unixs":
|
||||||
|
proto = "unix"
|
||||||
|
host = url.Host + url.Path
|
||||||
|
default:
|
||||||
|
proto, host = "", ""
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||||
|
creds = c.creds
|
||||||
|
switch scheme {
|
||||||
|
case "unix":
|
||||||
|
case "http":
|
||||||
|
creds = nil
|
||||||
|
case "https", "unixs":
|
||||||
|
if creds != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
tlsconfig := &tls.Config{}
|
||||||
|
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||||
|
creds = &emptyCreds
|
||||||
|
default:
|
||||||
|
creds = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialSetupOpts gives the dial opts prior to any authentication
|
||||||
|
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
|
||||||
|
if c.cfg.DialTimeout > 0 {
|
||||||
|
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||||
|
}
|
||||||
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
|
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||||
|
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||||
|
if host == "" && endpoint != "" {
|
||||||
|
// dialing an endpoint not in the balancer; use
|
||||||
|
// endpoint passed into dial
|
||||||
|
proto, host, _ = parseEndpoint(endpoint)
|
||||||
|
}
|
||||||
|
if proto == "" {
|
||||||
|
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return nil, c.ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
dialer := &net.Dialer{Timeout: t}
|
||||||
|
conn, err := dialer.DialContext(c.ctx, proto, host)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case c.dialerrc <- err:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conn, err
|
||||||
|
}
|
||||||
|
opts = append(opts, grpc.WithDialer(f))
|
||||||
|
|
||||||
|
creds := c.creds
|
||||||
|
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||||
|
creds = c.processCreds(scheme)
|
||||||
|
}
|
||||||
|
if creds != nil {
|
||||||
|
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||||
|
} else {
|
||||||
|
opts = append(opts, grpc.WithInsecure())
|
||||||
|
}
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial connects to a single endpoint using the client's config.
|
||||||
|
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||||
|
return c.dial(endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getToken(ctx context.Context) error {
|
||||||
|
var err error // return last error in a case of fail
|
||||||
|
var auth *authenticator
|
||||||
|
|
||||||
|
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||||
|
endpoint := c.cfg.Endpoints[i]
|
||||||
|
host := getHost(endpoint)
|
||||||
|
// use dial options without dopts to avoid reusing the client balancer
|
||||||
|
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer auth.close()
|
||||||
|
|
||||||
|
var resp *AuthenticateResponse
|
||||||
|
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.tokenCred.tokenMu.Lock()
|
||||||
|
c.tokenCred.token = resp.Token
|
||||||
|
c.tokenCred.tokenMu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
|
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||||
|
host := getHost(endpoint)
|
||||||
|
if c.Username != "" && c.Password != "" {
|
||||||
|
c.tokenCred = &authTokenCredential{
|
||||||
|
tokenMu: &sync.RWMutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := c.ctx
|
||||||
|
if c.cfg.DialTimeout > 0 {
|
||||||
|
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||||
|
defer cancel()
|
||||||
|
ctx = cctx
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.getToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||||
|
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||||
|
err = grpc.ErrClientConnTimeout
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, c.cfg.DialOptions...)
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRequireLeader requires client requests to only succeed
|
||||||
|
// when the cluster has a leader.
|
||||||
|
func WithRequireLeader(ctx context.Context) context.Context {
|
||||||
|
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||||
|
return metadata.NewContext(ctx, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClient(cfg *Config) (*Client, error) {
|
||||||
|
if cfg == nil {
|
||||||
|
cfg = &Config{}
|
||||||
|
}
|
||||||
|
var creds *credentials.TransportCredentials
|
||||||
|
if cfg.TLS != nil {
|
||||||
|
c := credentials.NewTLS(cfg.TLS)
|
||||||
|
creds = &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// use a temporary skeleton client to bootstrap first connection
|
||||||
|
baseCtx := context.TODO()
|
||||||
|
if cfg.Context != nil {
|
||||||
|
baseCtx = cfg.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(baseCtx)
|
||||||
|
client := &Client{
|
||||||
|
conn: nil,
|
||||||
|
dialerrc: make(chan error, 1),
|
||||||
|
cfg: *cfg,
|
||||||
|
creds: creds,
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
}
|
||||||
|
if cfg.Username != "" && cfg.Password != "" {
|
||||||
|
client.Username = cfg.Username
|
||||||
|
client.Password = cfg.Password
|
||||||
|
}
|
||||||
|
|
||||||
|
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||||
|
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||||
|
// grpc will assume the ServerName is in the endpoint.
|
||||||
|
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||||
|
if err != nil {
|
||||||
|
client.cancel()
|
||||||
|
client.balancer.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.conn = conn
|
||||||
|
client.retryWrapper = client.newRetryWrapper()
|
||||||
|
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||||
|
|
||||||
|
// wait for a connection
|
||||||
|
if cfg.DialTimeout > 0 {
|
||||||
|
hasConn := false
|
||||||
|
waitc := time.After(cfg.DialTimeout)
|
||||||
|
select {
|
||||||
|
case <-client.balancer.readyc:
|
||||||
|
hasConn = true
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-waitc:
|
||||||
|
}
|
||||||
|
if !hasConn {
|
||||||
|
err := grpc.ErrClientConnTimeout
|
||||||
|
select {
|
||||||
|
case err = <-client.dialerrc:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
client.cancel()
|
||||||
|
client.balancer.Close()
|
||||||
|
conn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
client.Cluster = NewCluster(client)
|
||||||
|
client.KV = NewKV(client)
|
||||||
|
client.Lease = NewLease(client)
|
||||||
|
client.Watcher = NewWatcher(client)
|
||||||
|
client.Auth = NewAuth(client)
|
||||||
|
client.Maintenance = NewMaintenance(client)
|
||||||
|
|
||||||
|
if cfg.RejectOldCluster {
|
||||||
|
if err := client.checkVersion(); err != nil {
|
||||||
|
client.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go client.autoSync()
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) checkVersion() (err error) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
errc := make(chan error, len(c.cfg.Endpoints))
|
||||||
|
ctx, cancel := context.WithCancel(c.ctx)
|
||||||
|
if c.cfg.DialTimeout > 0 {
|
||||||
|
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||||
|
}
|
||||||
|
wg.Add(len(c.cfg.Endpoints))
|
||||||
|
for _, ep := range c.cfg.Endpoints {
|
||||||
|
// if cluster is current, any endpoint gives a recent version
|
||||||
|
go func(e string) {
|
||||||
|
defer wg.Done()
|
||||||
|
resp, rerr := c.Status(ctx, e)
|
||||||
|
if rerr != nil {
|
||||||
|
errc <- rerr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
vs := strings.Split(resp.Version, ".")
|
||||||
|
maj, min := 0, 0
|
||||||
|
if len(vs) >= 2 {
|
||||||
|
maj, rerr = strconv.Atoi(vs[0])
|
||||||
|
min, rerr = strconv.Atoi(vs[1])
|
||||||
|
}
|
||||||
|
if maj < 3 || (maj == 3 && min < 2) {
|
||||||
|
rerr = ErrOldCluster
|
||||||
|
}
|
||||||
|
errc <- rerr
|
||||||
|
}(ep)
|
||||||
|
}
|
||||||
|
// wait for success
|
||||||
|
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||||
|
if err = <-errc; err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveConnection returns the current in-use connection
|
||||||
|
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||||
|
|
||||||
|
// isHaltErr returns true if the given error and context indicate no forward
|
||||||
|
// progress can be made, even after reconnecting.
|
||||||
|
func isHaltErr(ctx context.Context, err error) bool {
|
||||||
|
if ctx != nil && ctx.Err() != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
code := grpc.Code(err)
|
||||||
|
// Unavailable codes mean the system will be right back.
|
||||||
|
// (e.g., can't connect, lost leader)
|
||||||
|
// Treat Internal codes as if something failed, leaving the
|
||||||
|
// system in an inconsistent state, but retrying could make progress.
|
||||||
|
// (e.g., failed in middle of send, corrupted frame)
|
||||||
|
// TODO: are permanent Internal errors possible from grpc?
|
||||||
|
return code != codes.Unavailable && code != codes.Internal
|
||||||
|
}
|
||||||
|
|
||||||
|
func toErr(ctx context.Context, err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = rpctypes.Error(err)
|
||||||
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
code := grpc.Code(err)
|
||||||
|
switch code {
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
fallthrough
|
||||||
|
case codes.Canceled:
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
case codes.Unavailable:
|
||||||
|
err = ErrNoAvailableEndpoints
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
err = grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func canceledByCaller(stopCtx context.Context, err error) bool {
|
||||||
|
if stopCtx.Err() == nil || err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return err == context.Canceled || err == context.DeadlineExceeded
|
||||||
|
}
|
100
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
Normal file
100
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Member pb.Member
|
||||||
|
MemberListResponse pb.MemberListResponse
|
||||||
|
MemberAddResponse pb.MemberAddResponse
|
||||||
|
MemberRemoveResponse pb.MemberRemoveResponse
|
||||||
|
MemberUpdateResponse pb.MemberUpdateResponse
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cluster interface {
|
||||||
|
// MemberList lists the current cluster membership.
|
||||||
|
MemberList(ctx context.Context) (*MemberListResponse, error)
|
||||||
|
|
||||||
|
// MemberAdd adds a new member into the cluster.
|
||||||
|
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||||
|
|
||||||
|
// MemberRemove removes an existing member from the cluster.
|
||||||
|
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
|
||||||
|
|
||||||
|
// MemberUpdate updates the peer addresses of the member.
|
||||||
|
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type cluster struct {
|
||||||
|
remote pb.ClusterClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCluster(c *Client) Cluster {
|
||||||
|
return &cluster{remote: RetryClusterClient(c)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||||
|
return &cluster{remote: remote}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||||
|
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||||
|
resp, err := c.remote.MemberAdd(ctx, r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
return (*MemberAddResponse)(resp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||||
|
r := &pb.MemberRemoveRequest{ID: id}
|
||||||
|
resp, err := c.remote.MemberRemove(ctx, r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
return (*MemberRemoveResponse)(resp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||||
|
// it is safe to retry on update.
|
||||||
|
for {
|
||||||
|
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||||
|
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
return (*MemberUpdateResponse)(resp), nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||||
|
// it is safe to retry on list.
|
||||||
|
for {
|
||||||
|
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
return (*MemberListResponse)(resp), nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
53
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CompactOp represents a compact operation.
|
||||||
|
type CompactOp struct {
|
||||||
|
revision int64
|
||||||
|
physical bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompactOption configures compact operation.
|
||||||
|
type CompactOption func(*CompactOp)
|
||||||
|
|
||||||
|
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpCompact wraps slice CompactOption to create a CompactOp.
|
||||||
|
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
|
||||||
|
ret := CompactOp{revision: rev}
|
||||||
|
ret.applyCompactOpts(opts)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||||
|
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCompactPhysical makes compact RPC call wait until
|
||||||
|
// the compaction is physically applied to the local database
|
||||||
|
// such that compacted entries are totally removed from the
|
||||||
|
// backend database.
|
||||||
|
func WithCompactPhysical() CompactOption {
|
||||||
|
return func(op *CompactOp) { op.physical = true }
|
||||||
|
}
|
110
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
Normal file
110
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CompareTarget int
|
||||||
|
type CompareResult int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CompareVersion CompareTarget = iota
|
||||||
|
CompareCreated
|
||||||
|
CompareModified
|
||||||
|
CompareValue
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cmp pb.Compare
|
||||||
|
|
||||||
|
func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||||
|
var r pb.Compare_CompareResult
|
||||||
|
|
||||||
|
switch result {
|
||||||
|
case "=":
|
||||||
|
r = pb.Compare_EQUAL
|
||||||
|
case "!=":
|
||||||
|
r = pb.Compare_NOT_EQUAL
|
||||||
|
case ">":
|
||||||
|
r = pb.Compare_GREATER
|
||||||
|
case "<":
|
||||||
|
r = pb.Compare_LESS
|
||||||
|
default:
|
||||||
|
panic("Unknown result op")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmp.Result = r
|
||||||
|
switch cmp.Target {
|
||||||
|
case pb.Compare_VALUE:
|
||||||
|
val, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
panic("bad compare value")
|
||||||
|
}
|
||||||
|
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
|
||||||
|
case pb.Compare_VERSION:
|
||||||
|
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
|
||||||
|
case pb.Compare_CREATE:
|
||||||
|
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||||
|
case pb.Compare_MOD:
|
||||||
|
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||||
|
default:
|
||||||
|
panic("Unknown compare type")
|
||||||
|
}
|
||||||
|
return cmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func Value(key string) Cmp {
|
||||||
|
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Version(key string) Cmp {
|
||||||
|
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateRevision(key string) Cmp {
|
||||||
|
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ModRevision(key string) Cmp {
|
||||||
|
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyBytes returns the byte slice holding with the comparison key.
|
||||||
|
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||||
|
|
||||||
|
// WithKeyBytes sets the byte slice for the comparison key.
|
||||||
|
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
|
||||||
|
|
||||||
|
// ValueBytes returns the byte slice holding the comparison value, if any.
|
||||||
|
func (cmp *Cmp) ValueBytes() []byte {
|
||||||
|
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
|
||||||
|
return tu.Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValueBytes sets the byte slice for the comparison's value.
|
||||||
|
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||||
|
|
||||||
|
func mustInt64(val interface{}) int64 {
|
||||||
|
if v, ok := val.(int64); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok := val.(int); ok {
|
||||||
|
return int64(v)
|
||||||
|
}
|
||||||
|
panic("bad value")
|
||||||
|
}
|
17
vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package concurrency implements concurrency operations on top of
|
||||||
|
// etcd such as distributed locks, barriers, and elections.
|
||||||
|
package concurrency
|
243
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
Normal file
243
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
Normal file
|
@ -0,0 +1,243 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package concurrency
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrElectionNotLeader = errors.New("election: not leader")
|
||||||
|
ErrElectionNoLeader = errors.New("election: no leader")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Election struct {
|
||||||
|
session *Session
|
||||||
|
|
||||||
|
keyPrefix string
|
||||||
|
|
||||||
|
leaderKey string
|
||||||
|
leaderRev int64
|
||||||
|
leaderSession *Session
|
||||||
|
hdr *pb.ResponseHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewElection returns a new election on a given key prefix.
|
||||||
|
func NewElection(s *Session, pfx string) *Election {
|
||||||
|
return &Election{session: s, keyPrefix: pfx + "/"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResumeElection initializes an election with a known leader.
|
||||||
|
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
||||||
|
return &Election{
|
||||||
|
session: s,
|
||||||
|
leaderKey: leaderKey,
|
||||||
|
leaderRev: leaderRev,
|
||||||
|
leaderSession: s,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Campaign puts a value as eligible for the election. It blocks until
|
||||||
|
// it is elected, an error occurs, or the context is cancelled.
|
||||||
|
func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||||
|
s := e.session
|
||||||
|
client := e.session.Client()
|
||||||
|
|
||||||
|
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
|
||||||
|
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
||||||
|
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
||||||
|
txn = txn.Else(v3.OpGet(k))
|
||||||
|
resp, err := txn.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
||||||
|
if !resp.Succeeded {
|
||||||
|
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
||||||
|
e.leaderRev = kv.CreateRevision
|
||||||
|
if string(kv.Value) != val {
|
||||||
|
if err = e.Proclaim(ctx, val); err != nil {
|
||||||
|
e.Resign(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||||
|
if err != nil {
|
||||||
|
// clean up in case of context cancel
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.Resign(client.Ctx())
|
||||||
|
default:
|
||||||
|
e.leaderSession = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.hdr = resp.Header
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proclaim lets the leader announce a new value without another election.
|
||||||
|
func (e *Election) Proclaim(ctx context.Context, val string) error {
|
||||||
|
if e.leaderSession == nil {
|
||||||
|
return ErrElectionNotLeader
|
||||||
|
}
|
||||||
|
client := e.session.Client()
|
||||||
|
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||||
|
txn := client.Txn(ctx).If(cmp)
|
||||||
|
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
||||||
|
tresp, terr := txn.Commit()
|
||||||
|
if terr != nil {
|
||||||
|
return terr
|
||||||
|
}
|
||||||
|
if !tresp.Succeeded {
|
||||||
|
e.leaderKey = ""
|
||||||
|
return ErrElectionNotLeader
|
||||||
|
}
|
||||||
|
|
||||||
|
e.hdr = tresp.Header
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resign lets a leader start a new election.
|
||||||
|
func (e *Election) Resign(ctx context.Context) (err error) {
|
||||||
|
if e.leaderSession == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
client := e.session.Client()
|
||||||
|
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||||
|
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
|
||||||
|
if err == nil {
|
||||||
|
e.hdr = resp.Header
|
||||||
|
}
|
||||||
|
e.leaderKey = ""
|
||||||
|
e.leaderSession = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leader returns the leader value for the current election.
|
||||||
|
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
|
||||||
|
client := e.session.Client()
|
||||||
|
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if len(resp.Kvs) == 0 {
|
||||||
|
// no leader currently elected
|
||||||
|
return nil, ErrElectionNoLeader
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Observe returns a channel that reliably observes ordered leader proposals
|
||||||
|
// as GetResponse values on every current elected leader key. It will not
|
||||||
|
// necessarily fetch all historical leader updates, but will always post the
|
||||||
|
// most recent leader value.
|
||||||
|
//
|
||||||
|
// The channel closes when the context is canceled or the underlying watcher
|
||||||
|
// is otherwise disrupted.
|
||||||
|
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
|
||||||
|
retc := make(chan v3.GetResponse)
|
||||||
|
go e.observe(ctx, retc)
|
||||||
|
return retc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||||
|
client := e.session.Client()
|
||||||
|
|
||||||
|
defer close(ch)
|
||||||
|
for {
|
||||||
|
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var kv *mvccpb.KeyValue
|
||||||
|
var hdr *pb.ResponseHeader
|
||||||
|
|
||||||
|
if len(resp.Kvs) == 0 {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
// wait for first key put on prefix
|
||||||
|
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
||||||
|
wch := client.Watch(cctx, e.keyPrefix, opts...)
|
||||||
|
for kv == nil {
|
||||||
|
wr, ok := <-wch
|
||||||
|
if !ok || wr.Err() != nil {
|
||||||
|
cancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// only accept PUTs; a DELETE will make observe() spin
|
||||||
|
for _, ev := range wr.Events {
|
||||||
|
if ev.Type == mvccpb.PUT {
|
||||||
|
hdr, kv = &wr.Header, ev.Kv
|
||||||
|
// may have multiple revs; hdr.rev = the last rev
|
||||||
|
// set to kv's rev in case batch has multiple PUTs
|
||||||
|
hdr.Revision = kv.ModRevision
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
} else {
|
||||||
|
hdr, kv = resp.Header, resp.Kvs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
|
||||||
|
keyDeleted := false
|
||||||
|
for !keyDeleted {
|
||||||
|
wr, ok := <-wch
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, ev := range wr.Events {
|
||||||
|
if ev.Type == mvccpb.DELETE {
|
||||||
|
keyDeleted = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
resp.Header = &wr.Header
|
||||||
|
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
|
||||||
|
select {
|
||||||
|
case ch <- *resp:
|
||||||
|
case <-cctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the leader key if elected, empty string otherwise.
|
||||||
|
func (e *Election) Key() string { return e.leaderKey }
|
||||||
|
|
||||||
|
// Rev returns the leader key's creation revision, if elected.
|
||||||
|
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||||
|
|
||||||
|
// Header is the response header from the last successful election proposal.
|
||||||
|
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
65
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
Normal file
65
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package concurrency
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var wr v3.WatchResponse
|
||||||
|
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
||||||
|
for wr = range wch {
|
||||||
|
for _, ev := range wr.Events {
|
||||||
|
if ev.Type == mvccpb.DELETE {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := wr.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf("lost watcher waiting for delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||||
|
// than the create revision.
|
||||||
|
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
||||||
|
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||||
|
for {
|
||||||
|
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(resp.Kvs) == 0 {
|
||||||
|
return resp.Header, nil
|
||||||
|
}
|
||||||
|
lastKey := string(resp.Kvs[0].Key)
|
||||||
|
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
110
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
Normal file
110
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package concurrency
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mutex implements the sync Locker interface with etcd
|
||||||
|
type Mutex struct {
|
||||||
|
s *Session
|
||||||
|
|
||||||
|
pfx string
|
||||||
|
myKey string
|
||||||
|
myRev int64
|
||||||
|
hdr *pb.ResponseHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMutex(s *Session, pfx string) *Mutex {
|
||||||
|
return &Mutex{s, pfx + "/", "", -1, nil}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock locks the mutex with a cancelable context. If the context is canceled
|
||||||
|
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||||
|
func (m *Mutex) Lock(ctx context.Context) error {
|
||||||
|
s := m.s
|
||||||
|
client := m.s.Client()
|
||||||
|
|
||||||
|
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
|
||||||
|
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
||||||
|
// put self in lock waiters via myKey; oldest waiter holds lock
|
||||||
|
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||||
|
// reuse key in case this session already holds the lock
|
||||||
|
get := v3.OpGet(m.myKey)
|
||||||
|
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.myRev = resp.Header.Revision
|
||||||
|
if !resp.Succeeded {
|
||||||
|
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for deletion revisions prior to myKey
|
||||||
|
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||||
|
// release lock key if cancelled
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
m.Unlock(client.Ctx())
|
||||||
|
default:
|
||||||
|
m.hdr = hdr
|
||||||
|
}
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mutex) Unlock(ctx context.Context) error {
|
||||||
|
client := m.s.Client()
|
||||||
|
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.myKey = "\x00"
|
||||||
|
m.myRev = -1
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mutex) IsOwner() v3.Cmp {
|
||||||
|
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mutex) Key() string { return m.myKey }
|
||||||
|
|
||||||
|
// Header is the response header received from etcd on acquiring the lock.
|
||||||
|
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
|
||||||
|
|
||||||
|
type lockerMutex struct{ *Mutex }
|
||||||
|
|
||||||
|
func (lm *lockerMutex) Lock() {
|
||||||
|
client := lm.s.Client()
|
||||||
|
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (lm *lockerMutex) Unlock() {
|
||||||
|
client := lm.s.Client()
|
||||||
|
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
||||||
|
func NewLocker(s *Session, pfx string) sync.Locker {
|
||||||
|
return &lockerMutex{NewMutex(s, pfx)}
|
||||||
|
}
|
140
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
Normal file
140
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package concurrency
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultSessionTTL = 60
|
||||||
|
|
||||||
|
// Session represents a lease kept alive for the lifetime of a client.
|
||||||
|
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||||
|
type Session struct {
|
||||||
|
client *v3.Client
|
||||||
|
opts *sessionOptions
|
||||||
|
id v3.LeaseID
|
||||||
|
|
||||||
|
cancel context.CancelFunc
|
||||||
|
donec <-chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession gets the leased session for a client.
|
||||||
|
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||||
|
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(ops)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := ops.leaseID
|
||||||
|
if id == v3.NoLease {
|
||||||
|
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id = v3.LeaseID(resp.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ops.ctx)
|
||||||
|
keepAlive, err := client.KeepAlive(ctx, id)
|
||||||
|
if err != nil || keepAlive == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
donec := make(chan struct{})
|
||||||
|
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
|
||||||
|
|
||||||
|
// keep the lease alive until client error or cancelled context
|
||||||
|
go func() {
|
||||||
|
defer close(donec)
|
||||||
|
for range keepAlive {
|
||||||
|
// eat messages until keep alive channel closes
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is the etcd client that is attached to the session.
|
||||||
|
func (s *Session) Client() *v3.Client {
|
||||||
|
return s.client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lease is the lease ID for keys bound to the session.
|
||||||
|
func (s *Session) Lease() v3.LeaseID { return s.id }
|
||||||
|
|
||||||
|
// Done returns a channel that closes when the lease is orphaned, expires, or
|
||||||
|
// is otherwise no longer being refreshed.
|
||||||
|
func (s *Session) Done() <-chan struct{} { return s.donec }
|
||||||
|
|
||||||
|
// Orphan ends the refresh for the session lease. This is useful
|
||||||
|
// in case the state of the client connection is indeterminate (revoke
|
||||||
|
// would fail) or when transferring lease ownership.
|
||||||
|
func (s *Session) Orphan() {
|
||||||
|
s.cancel()
|
||||||
|
<-s.donec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close orphans the session and revokes the session lease.
|
||||||
|
func (s *Session) Close() error {
|
||||||
|
s.Orphan()
|
||||||
|
// if revoke takes longer than the ttl, lease is expired anyway
|
||||||
|
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
|
||||||
|
_, err := s.client.Revoke(ctx, s.id)
|
||||||
|
cancel()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type sessionOptions struct {
|
||||||
|
ttl int
|
||||||
|
leaseID v3.LeaseID
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// SessionOption configures Session.
|
||||||
|
type SessionOption func(*sessionOptions)
|
||||||
|
|
||||||
|
// WithTTL configures the session's TTL in seconds.
|
||||||
|
// If TTL is <= 0, the default 60 seconds TTL will be used.
|
||||||
|
func WithTTL(ttl int) SessionOption {
|
||||||
|
return func(so *sessionOptions) {
|
||||||
|
if ttl > 0 {
|
||||||
|
so.ttl = ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLease specifies the existing leaseID to be used for the session.
|
||||||
|
// This is useful in process restart scenario, for example, to reclaim
|
||||||
|
// leadership from an election prior to restart.
|
||||||
|
func WithLease(leaseID v3.LeaseID) SessionOption {
|
||||||
|
return func(so *sessionOptions) {
|
||||||
|
so.leaseID = leaseID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext assigns a context to the session instead of defaulting to
|
||||||
|
// using the client context. This is useful for canceling NewSession and
|
||||||
|
// Close operations immediately without having to close the client. If the
|
||||||
|
// context is canceled before Close() completes, the session's lease will be
|
||||||
|
// abandoned and left to expire instead of being revoked.
|
||||||
|
func WithContext(ctx context.Context) SessionOption {
|
||||||
|
return func(so *sessionOptions) {
|
||||||
|
so.ctx = ctx
|
||||||
|
}
|
||||||
|
}
|
387
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
Normal file
387
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
Normal file
|
@ -0,0 +1,387 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package concurrency
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
|
||||||
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// STM is an interface for software transactional memory.
|
||||||
|
type STM interface {
|
||||||
|
// Get returns the value for a key and inserts the key in the txn's read set.
|
||||||
|
// If Get fails, it aborts the transaction with an error, never returning.
|
||||||
|
Get(key ...string) string
|
||||||
|
// Put adds a value for a key to the write set.
|
||||||
|
Put(key, val string, opts ...v3.OpOption)
|
||||||
|
// Rev returns the revision of a key in the read set.
|
||||||
|
Rev(key string) int64
|
||||||
|
// Del deletes a key.
|
||||||
|
Del(key string)
|
||||||
|
|
||||||
|
// commit attempts to apply the txn's changes to the server.
|
||||||
|
commit() *v3.TxnResponse
|
||||||
|
reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Isolation is an enumeration of transactional isolation levels which
|
||||||
|
// describes how transactions should interfere and conflict.
|
||||||
|
type Isolation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SerializableSnapshot provides serializable isolation and also checks
|
||||||
|
// for write conflicts.
|
||||||
|
SerializableSnapshot Isolation = iota
|
||||||
|
// Serializable reads within the same transactiona attempt return data
|
||||||
|
// from the at the revision of the first read.
|
||||||
|
Serializable
|
||||||
|
// RepeatableReads reads within the same transaction attempt always
|
||||||
|
// return the same data.
|
||||||
|
RepeatableReads
|
||||||
|
// ReadCommitted reads keys from any committed revision.
|
||||||
|
ReadCommitted
|
||||||
|
)
|
||||||
|
|
||||||
|
// stmError safely passes STM errors through panic to the STM error channel.
|
||||||
|
type stmError struct{ err error }
|
||||||
|
|
||||||
|
type stmOptions struct {
|
||||||
|
iso Isolation
|
||||||
|
ctx context.Context
|
||||||
|
prefetch []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type stmOption func(*stmOptions)
|
||||||
|
|
||||||
|
// WithIsolation specifies the transaction isolation level.
|
||||||
|
func WithIsolation(lvl Isolation) stmOption {
|
||||||
|
return func(so *stmOptions) { so.iso = lvl }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAbortContext specifies the context for permanently aborting the transaction.
|
||||||
|
func WithAbortContext(ctx context.Context) stmOption {
|
||||||
|
return func(so *stmOptions) { so.ctx = ctx }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
|
||||||
|
// If an STM transaction will unconditionally fetch a set of keys, prefetching
|
||||||
|
// those keys will save the round-trip cost from requesting each key one by one
|
||||||
|
// with Get().
|
||||||
|
func WithPrefetch(keys ...string) stmOption {
|
||||||
|
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSTM initiates a new STM instance, using snapshot isolation by default.
|
||||||
|
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||||
|
opts := &stmOptions{ctx: c.Ctx()}
|
||||||
|
for _, f := range so {
|
||||||
|
f(opts)
|
||||||
|
}
|
||||||
|
if len(opts.prefetch) != 0 {
|
||||||
|
f := apply
|
||||||
|
apply = func(s STM) error {
|
||||||
|
s.Get(opts.prefetch...)
|
||||||
|
return f(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return runSTM(mkSTM(c, opts), apply)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkSTM(c *v3.Client, opts *stmOptions) STM {
|
||||||
|
switch opts.iso {
|
||||||
|
case SerializableSnapshot:
|
||||||
|
s := &stmSerializable{
|
||||||
|
stm: stm{client: c, ctx: opts.ctx},
|
||||||
|
prefetch: make(map[string]*v3.GetResponse),
|
||||||
|
}
|
||||||
|
s.conflicts = func() []v3.Cmp {
|
||||||
|
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
case Serializable:
|
||||||
|
s := &stmSerializable{
|
||||||
|
stm: stm{client: c, ctx: opts.ctx},
|
||||||
|
prefetch: make(map[string]*v3.GetResponse),
|
||||||
|
}
|
||||||
|
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||||
|
return s
|
||||||
|
case RepeatableReads:
|
||||||
|
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||||
|
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||||
|
return s
|
||||||
|
case ReadCommitted:
|
||||||
|
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||||
|
s.conflicts = func() []v3.Cmp { return nil }
|
||||||
|
return s
|
||||||
|
default:
|
||||||
|
panic("unsupported stm")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type stmResponse struct {
|
||||||
|
resp *v3.TxnResponse
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||||
|
outc := make(chan stmResponse, 1)
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
e, ok := r.(stmError)
|
||||||
|
if !ok {
|
||||||
|
// client apply panicked
|
||||||
|
panic(r)
|
||||||
|
}
|
||||||
|
outc <- stmResponse{nil, e.err}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var out stmResponse
|
||||||
|
for {
|
||||||
|
s.reset()
|
||||||
|
if out.err = apply(s); out.err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if out.resp = s.commit(); out.resp != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
outc <- out
|
||||||
|
}()
|
||||||
|
r := <-outc
|
||||||
|
return r.resp, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// stm implements repeatable-read software transactional memory over etcd
|
||||||
|
type stm struct {
|
||||||
|
client *v3.Client
|
||||||
|
ctx context.Context
|
||||||
|
// rset holds read key values and revisions
|
||||||
|
rset readSet
|
||||||
|
// wset holds overwritten keys and their values
|
||||||
|
wset writeSet
|
||||||
|
// getOpts are the opts used for gets
|
||||||
|
getOpts []v3.OpOption
|
||||||
|
// conflicts computes the current conflicts on the txn
|
||||||
|
conflicts func() []v3.Cmp
|
||||||
|
}
|
||||||
|
|
||||||
|
type stmPut struct {
|
||||||
|
val string
|
||||||
|
op v3.Op
|
||||||
|
}
|
||||||
|
|
||||||
|
type readSet map[string]*v3.GetResponse
|
||||||
|
|
||||||
|
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||||
|
for i, resp := range txnresp.Responses {
|
||||||
|
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// first returns the store revision from the first fetch
|
||||||
|
func (rs readSet) first() int64 {
|
||||||
|
ret := int64(math.MaxInt64 - 1)
|
||||||
|
for _, resp := range rs {
|
||||||
|
if rev := resp.Header.Revision; rev < ret {
|
||||||
|
ret = rev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmps guards the txn from updates to read set
|
||||||
|
func (rs readSet) cmps() []v3.Cmp {
|
||||||
|
cmps := make([]v3.Cmp, 0, len(rs))
|
||||||
|
for k, rk := range rs {
|
||||||
|
cmps = append(cmps, isKeyCurrent(k, rk))
|
||||||
|
}
|
||||||
|
return cmps
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeSet map[string]stmPut
|
||||||
|
|
||||||
|
func (ws writeSet) get(keys ...string) *stmPut {
|
||||||
|
for _, key := range keys {
|
||||||
|
if wv, ok := ws[key]; ok {
|
||||||
|
return &wv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmps returns a cmp list testing no writes have happened past rev
|
||||||
|
func (ws writeSet) cmps(rev int64) []v3.Cmp {
|
||||||
|
cmps := make([]v3.Cmp, 0, len(ws))
|
||||||
|
for key := range ws {
|
||||||
|
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
|
||||||
|
}
|
||||||
|
return cmps
|
||||||
|
}
|
||||||
|
|
||||||
|
// puts is the list of ops for all pending writes
|
||||||
|
func (ws writeSet) puts() []v3.Op {
|
||||||
|
puts := make([]v3.Op, 0, len(ws))
|
||||||
|
for _, v := range ws {
|
||||||
|
puts = append(puts, v.op)
|
||||||
|
}
|
||||||
|
return puts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) Get(keys ...string) string {
|
||||||
|
if wv := s.wset.get(keys...); wv != nil {
|
||||||
|
return wv.val
|
||||||
|
}
|
||||||
|
return respToValue(s.fetch(keys...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
|
||||||
|
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
|
||||||
|
|
||||||
|
func (s *stm) Rev(key string) int64 {
|
||||||
|
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
|
||||||
|
return resp.Kvs[0].ModRevision
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) commit() *v3.TxnResponse {
|
||||||
|
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
|
||||||
|
if err != nil {
|
||||||
|
panic(stmError{err})
|
||||||
|
}
|
||||||
|
if txnresp.Succeeded {
|
||||||
|
return txnresp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) fetch(keys ...string) *v3.GetResponse {
|
||||||
|
if len(keys) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ops := make([]v3.Op, len(keys))
|
||||||
|
for i, key := range keys {
|
||||||
|
if resp, ok := s.rset[key]; ok {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
ops[i] = v3.OpGet(key, s.getOpts...)
|
||||||
|
}
|
||||||
|
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
|
||||||
|
if err != nil {
|
||||||
|
panic(stmError{err})
|
||||||
|
}
|
||||||
|
s.rset.add(keys, txnresp)
|
||||||
|
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stm) reset() {
|
||||||
|
s.rset = make(map[string]*v3.GetResponse)
|
||||||
|
s.wset = make(map[string]stmPut)
|
||||||
|
}
|
||||||
|
|
||||||
|
type stmSerializable struct {
|
||||||
|
stm
|
||||||
|
prefetch map[string]*v3.GetResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stmSerializable) Get(keys ...string) string {
|
||||||
|
if wv := s.wset.get(keys...); wv != nil {
|
||||||
|
return wv.val
|
||||||
|
}
|
||||||
|
firstRead := len(s.rset) == 0
|
||||||
|
for _, key := range keys {
|
||||||
|
if resp, ok := s.prefetch[key]; ok {
|
||||||
|
delete(s.prefetch, key)
|
||||||
|
s.rset[key] = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resp := s.stm.fetch(keys...)
|
||||||
|
if firstRead {
|
||||||
|
// txn's base revision is defined by the first read
|
||||||
|
s.getOpts = []v3.OpOption{
|
||||||
|
v3.WithRev(resp.Header.Revision),
|
||||||
|
v3.WithSerializable(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return respToValue(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stmSerializable) Rev(key string) int64 {
|
||||||
|
s.Get(key)
|
||||||
|
return s.stm.Rev(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stmSerializable) gets() ([]string, []v3.Op) {
|
||||||
|
keys := make([]string, 0, len(s.rset))
|
||||||
|
ops := make([]v3.Op, 0, len(s.rset))
|
||||||
|
for k := range s.rset {
|
||||||
|
keys = append(keys, k)
|
||||||
|
ops = append(ops, v3.OpGet(k))
|
||||||
|
}
|
||||||
|
return keys, ops
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stmSerializable) commit() *v3.TxnResponse {
|
||||||
|
keys, getops := s.gets()
|
||||||
|
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
|
||||||
|
// use Else to prefetch keys in case of conflict to save a round trip
|
||||||
|
txnresp, err := txn.Else(getops...).Commit()
|
||||||
|
if err != nil {
|
||||||
|
panic(stmError{err})
|
||||||
|
}
|
||||||
|
if txnresp.Succeeded {
|
||||||
|
return txnresp
|
||||||
|
}
|
||||||
|
// load prefetch with Else data
|
||||||
|
s.rset.add(keys, txnresp)
|
||||||
|
s.prefetch = s.rset
|
||||||
|
s.getOpts = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
|
||||||
|
if len(r.Kvs) != 0 {
|
||||||
|
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
|
||||||
|
}
|
||||||
|
return v3.Compare(v3.ModRevision(k), "=", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func respToValue(resp *v3.GetResponse) string {
|
||||||
|
if resp == nil || len(resp.Kvs) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return string(resp.Kvs[0].Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSTMRepeatable is deprecated.
|
||||||
|
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||||
|
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSTMSerializable is deprecated.
|
||||||
|
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||||
|
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSTMReadCommitted is deprecated.
|
||||||
|
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||||
|
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
|
||||||
|
}
|
54
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
Normal file
54
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Endpoints is a list of URLs.
|
||||||
|
Endpoints []string `json:"endpoints"`
|
||||||
|
|
||||||
|
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||||
|
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||||
|
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||||
|
|
||||||
|
// DialTimeout is the timeout for failing to establish a connection.
|
||||||
|
DialTimeout time.Duration `json:"dial-timeout"`
|
||||||
|
|
||||||
|
// TLS holds the client secure credentials, if any.
|
||||||
|
TLS *tls.Config
|
||||||
|
|
||||||
|
// Username is a username for authentication.
|
||||||
|
Username string `json:"username"`
|
||||||
|
|
||||||
|
// Password is a password for authentication.
|
||||||
|
Password string `json:"password"`
|
||||||
|
|
||||||
|
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||||
|
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||||
|
|
||||||
|
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||||
|
DialOptions []grpc.DialOption
|
||||||
|
|
||||||
|
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||||
|
// other operations that do not have an explicit context.
|
||||||
|
Context context.Context
|
||||||
|
}
|
64
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
Normal file
64
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package clientv3 implements the official Go etcd client for v3.
|
||||||
|
//
|
||||||
|
// Create client using `clientv3.New`:
|
||||||
|
//
|
||||||
|
// cli, err := clientv3.New(clientv3.Config{
|
||||||
|
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||||
|
// DialTimeout: 5 * time.Second,
|
||||||
|
// })
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error!
|
||||||
|
// }
|
||||||
|
// defer cli.Close()
|
||||||
|
//
|
||||||
|
// Make sure to close the client after using it. If the client is not closed, the
|
||||||
|
// connection will have leaky goroutines.
|
||||||
|
//
|
||||||
|
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||||
|
//
|
||||||
|
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||||
|
// cancel()
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error!
|
||||||
|
// }
|
||||||
|
// // use the response
|
||||||
|
//
|
||||||
|
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||||
|
// Clients are safe for concurrent use by multiple goroutines.
|
||||||
|
//
|
||||||
|
// etcd client returns 2 types of errors:
|
||||||
|
//
|
||||||
|
// 1. context error: canceled or deadline exceeded.
|
||||||
|
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||||
|
//
|
||||||
|
// Here is the example code to handle client errors:
|
||||||
|
//
|
||||||
|
// resp, err := kvc.Put(ctx, "", "")
|
||||||
|
// if err != nil {
|
||||||
|
// if err == context.Canceled {
|
||||||
|
// // ctx is canceled by another routine
|
||||||
|
// } else if err == context.DeadlineExceeded {
|
||||||
|
// // ctx is attached with a deadline and it exceeded
|
||||||
|
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
|
||||||
|
// // process (verr.Errors)
|
||||||
|
// } else {
|
||||||
|
// // bad cluster endpoints, which are not etcd servers
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
package clientv3
|
162
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
Normal file
162
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
CompactResponse pb.CompactionResponse
|
||||||
|
PutResponse pb.PutResponse
|
||||||
|
GetResponse pb.RangeResponse
|
||||||
|
DeleteResponse pb.DeleteRangeResponse
|
||||||
|
TxnResponse pb.TxnResponse
|
||||||
|
)
|
||||||
|
|
||||||
|
type KV interface {
|
||||||
|
// Put puts a key-value pair into etcd.
|
||||||
|
// Note that key,value can be plain bytes array and string is
|
||||||
|
// an immutable representation of that bytes array.
|
||||||
|
// To get a string of bytes, do string([]byte{0x10, 0x20}).
|
||||||
|
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
|
||||||
|
|
||||||
|
// Get retrieves keys.
|
||||||
|
// By default, Get will return the value for "key", if any.
|
||||||
|
// When passed WithRange(end), Get will return the keys in the range [key, end).
|
||||||
|
// When passed WithFromKey(), Get returns keys greater than or equal to key.
|
||||||
|
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
|
||||||
|
// if the required revision is compacted, the request will fail with ErrCompacted .
|
||||||
|
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
|
||||||
|
// When passed WithSort(), the keys will be sorted.
|
||||||
|
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
|
||||||
|
|
||||||
|
// Delete deletes a key, or optionally using WithRange(end), [key, end).
|
||||||
|
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
|
||||||
|
|
||||||
|
// Compact compacts etcd KV history before the given rev.
|
||||||
|
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
|
||||||
|
|
||||||
|
// Do applies a single Op on KV without a transaction.
|
||||||
|
// Do is useful when creating arbitrary operations to be issued at a
|
||||||
|
// later time; the user can range over the operations, calling Do to
|
||||||
|
// execute them. Get/Put/Delete, on the other hand, are best suited
|
||||||
|
// for when the operation should be issued at the time of declaration.
|
||||||
|
Do(ctx context.Context, op Op) (OpResponse, error)
|
||||||
|
|
||||||
|
// Txn creates a transaction.
|
||||||
|
Txn(ctx context.Context) Txn
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpResponse struct {
|
||||||
|
put *PutResponse
|
||||||
|
get *GetResponse
|
||||||
|
del *DeleteResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op OpResponse) Put() *PutResponse { return op.put }
|
||||||
|
func (op OpResponse) Get() *GetResponse { return op.get }
|
||||||
|
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||||
|
|
||||||
|
type kv struct {
|
||||||
|
remote pb.KVClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKV(c *Client) KV {
|
||||||
|
return &kv{remote: RetryKVClient(c)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||||
|
return &kv{remote: remote}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||||
|
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||||
|
return r.put, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
|
||||||
|
r, err := kv.Do(ctx, OpGet(key, opts...))
|
||||||
|
return r.get, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
|
||||||
|
r, err := kv.Do(ctx, OpDelete(key, opts...))
|
||||||
|
return r.del, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||||
|
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
return (*CompactResponse)(resp), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Txn(ctx context.Context) Txn {
|
||||||
|
return &txn{
|
||||||
|
kv: kv,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||||
|
for {
|
||||||
|
resp, err := kv.do(ctx, op)
|
||||||
|
if err == nil {
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return resp, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
// do not retry on modifications
|
||||||
|
if op.isWrite() {
|
||||||
|
return resp, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||||
|
var err error
|
||||||
|
switch op.t {
|
||||||
|
// TODO: handle other ops
|
||||||
|
case tRange:
|
||||||
|
var resp *pb.RangeResponse
|
||||||
|
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||||
|
}
|
||||||
|
case tPut:
|
||||||
|
var resp *pb.PutResponse
|
||||||
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||||
|
resp, err = kv.remote.Put(ctx, r)
|
||||||
|
if err == nil {
|
||||||
|
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||||
|
}
|
||||||
|
case tDeleteRange:
|
||||||
|
var resp *pb.DeleteRangeResponse
|
||||||
|
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||||
|
resp, err = kv.remote.DeleteRange(ctx, r)
|
||||||
|
if err == nil {
|
||||||
|
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("Unknown op")
|
||||||
|
}
|
||||||
|
return OpResponse{}, err
|
||||||
|
}
|
547
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
Normal file
547
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
Normal file
|
@ -0,0 +1,547 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
LeaseRevokeResponse pb.LeaseRevokeResponse
|
||||||
|
LeaseID int64
|
||||||
|
)
|
||||||
|
|
||||||
|
// LeaseGrantResponse is used to convert the protobuf grant response.
|
||||||
|
type LeaseGrantResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID
|
||||||
|
TTL int64
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
|
||||||
|
type LeaseKeepAliveResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID
|
||||||
|
TTL int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||||
|
type LeaseTimeToLiveResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID `json:"id"`
|
||||||
|
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
TTL int64 `json:"ttl"`
|
||||||
|
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
GrantedTTL int64 `json:"granted-ttl"`
|
||||||
|
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
Keys [][]byte `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
|
// deadline before the actual TTL is known to the client.
|
||||||
|
defaultTTL = 5 * time.Second
|
||||||
|
// a small buffer to store unsent lease responses.
|
||||||
|
leaseResponseChSize = 16
|
||||||
|
// NoLease is a lease ID for the absence of a lease.
|
||||||
|
NoLease LeaseID = 0
|
||||||
|
|
||||||
|
// retryConnWait is how long to wait before retrying request due to an error
|
||||||
|
retryConnWait = 500 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||||
|
//
|
||||||
|
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||||
|
type ErrKeepAliveHalted struct {
|
||||||
|
Reason error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrKeepAliveHalted) Error() string {
|
||||||
|
s := "etcdclient: leases keep alive halted"
|
||||||
|
if e.Reason != nil {
|
||||||
|
s += ": " + e.Reason.Error()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type Lease interface {
|
||||||
|
// Grant creates a new lease.
|
||||||
|
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||||
|
|
||||||
|
// Revoke revokes the given lease.
|
||||||
|
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||||
|
|
||||||
|
// TimeToLive retrieves the lease information of the given lease ID.
|
||||||
|
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||||
|
|
||||||
|
// KeepAlive keeps the given lease alive forever.
|
||||||
|
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
|
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
|
||||||
|
// should be used instead of KeepAliveOnce.
|
||||||
|
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
|
// Close releases all resources Lease keeps for efficient communication
|
||||||
|
// with the etcd server.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type lessor struct {
|
||||||
|
mu sync.Mutex // guards all fields
|
||||||
|
|
||||||
|
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||||
|
donec chan struct{}
|
||||||
|
loopErr error
|
||||||
|
|
||||||
|
remote pb.LeaseClient
|
||||||
|
|
||||||
|
stream pb.Lease_LeaseKeepAliveClient
|
||||||
|
streamCancel context.CancelFunc
|
||||||
|
|
||||||
|
stopCtx context.Context
|
||||||
|
stopCancel context.CancelFunc
|
||||||
|
|
||||||
|
keepAlives map[LeaseID]*keepAlive
|
||||||
|
|
||||||
|
// firstKeepAliveTimeout is the timeout for the first keepalive request
|
||||||
|
// before the actual TTL is known to the lease client
|
||||||
|
firstKeepAliveTimeout time.Duration
|
||||||
|
|
||||||
|
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
|
||||||
|
firstKeepAliveOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// keepAlive multiplexes a keepalive for a lease over multiple channels
|
||||||
|
type keepAlive struct {
|
||||||
|
chs []chan<- *LeaseKeepAliveResponse
|
||||||
|
ctxs []context.Context
|
||||||
|
// deadline is the time the keep alive channels close if no response
|
||||||
|
deadline time.Time
|
||||||
|
// nextKeepAlive is when to send the next keep alive message
|
||||||
|
nextKeepAlive time.Time
|
||||||
|
// donec is closed on lease revoke, expiration, or cancel.
|
||||||
|
donec chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLease(c *Client) Lease {
|
||||||
|
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||||
|
l := &lessor{
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
keepAlives: make(map[LeaseID]*keepAlive),
|
||||||
|
remote: remote,
|
||||||
|
firstKeepAliveTimeout: keepAliveTimeout,
|
||||||
|
}
|
||||||
|
if l.firstKeepAliveTimeout == time.Second {
|
||||||
|
l.firstKeepAliveTimeout = defaultTTL
|
||||||
|
}
|
||||||
|
reqLeaderCtx := WithRequireLeader(context.Background())
|
||||||
|
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||||
|
for {
|
||||||
|
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||||
|
resp, err := l.remote.LeaseGrant(ctx, r)
|
||||||
|
if err == nil {
|
||||||
|
gresp := &LeaseGrantResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
Error: resp.Error,
|
||||||
|
}
|
||||||
|
return gresp, nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||||
|
for {
|
||||||
|
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||||
|
resp, err := l.remote.LeaseRevoke(ctx, r)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return (*LeaseRevokeResponse)(resp), nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||||
|
for {
|
||||||
|
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||||
|
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
gresp := &LeaseTimeToLiveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
GrantedTTL: resp.GrantedTTL,
|
||||||
|
Keys: resp.Keys,
|
||||||
|
}
|
||||||
|
return gresp, nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
|
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
|
// ensure that recvKeepAliveLoop is still running
|
||||||
|
select {
|
||||||
|
case <-l.donec:
|
||||||
|
err := l.loopErr
|
||||||
|
l.mu.Unlock()
|
||||||
|
close(ch)
|
||||||
|
return ch, ErrKeepAliveHalted{Reason: err}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
ka, ok := l.keepAlives[id]
|
||||||
|
if !ok {
|
||||||
|
// create fresh keep alive
|
||||||
|
ka = &keepAlive{
|
||||||
|
chs: []chan<- *LeaseKeepAliveResponse{ch},
|
||||||
|
ctxs: []context.Context{ctx},
|
||||||
|
deadline: time.Now().Add(l.firstKeepAliveTimeout),
|
||||||
|
nextKeepAlive: time.Now(),
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
}
|
||||||
|
l.keepAlives[id] = ka
|
||||||
|
} else {
|
||||||
|
// add channel and context to existing keep alive
|
||||||
|
ka.ctxs = append(ka.ctxs, ctx)
|
||||||
|
ka.chs = append(ka.chs, ch)
|
||||||
|
}
|
||||||
|
l.mu.Unlock()
|
||||||
|
|
||||||
|
go l.keepAliveCtxCloser(id, ctx, ka.donec)
|
||||||
|
l.firstKeepAliveOnce.Do(func() {
|
||||||
|
go l.recvKeepAliveLoop()
|
||||||
|
go l.deadlineLoop()
|
||||||
|
})
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||||
|
for {
|
||||||
|
resp, err := l.keepAliveOnce(ctx, id)
|
||||||
|
if err == nil {
|
||||||
|
if resp.TTL <= 0 {
|
||||||
|
err = rpctypes.ErrLeaseNotFound
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) Close() error {
|
||||||
|
l.stopCancel()
|
||||||
|
// close for synchronous teardown if stream goroutines never launched
|
||||||
|
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
|
||||||
|
<-l.donec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
|
||||||
|
select {
|
||||||
|
case <-donec:
|
||||||
|
return
|
||||||
|
case <-l.donec:
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
ka, ok := l.keepAlives[id]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// close channel and remove context if still associated with keep alive
|
||||||
|
for i, c := range ka.ctxs {
|
||||||
|
if c == ctx {
|
||||||
|
close(ka.chs[i])
|
||||||
|
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
|
||||||
|
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// remove if no one more listeners
|
||||||
|
if len(ka.chs) == 0 {
|
||||||
|
delete(l.keepAlives, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeRequireLeader scans all keep alives for ctxs that have require leader
|
||||||
|
// and closes the associated channels.
|
||||||
|
func (l *lessor) closeRequireLeader() {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
for _, ka := range l.keepAlives {
|
||||||
|
reqIdxs := 0
|
||||||
|
// find all required leader channels, close, mark as nil
|
||||||
|
for i, ctx := range ka.ctxs {
|
||||||
|
md, ok := metadata.FromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ks := md[rpctypes.MetadataRequireLeaderKey]
|
||||||
|
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
close(ka.chs[i])
|
||||||
|
ka.chs[i] = nil
|
||||||
|
reqIdxs++
|
||||||
|
}
|
||||||
|
if reqIdxs == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// remove all channels that required a leader from keepalive
|
||||||
|
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
|
||||||
|
newCtxs := make([]context.Context, len(newChs))
|
||||||
|
newIdx := 0
|
||||||
|
for i := range ka.chs {
|
||||||
|
if ka.chs[i] == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
|
||||||
|
newIdx++
|
||||||
|
}
|
||||||
|
ka.chs, ka.ctxs = newChs, newCtxs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, rerr := stream.Recv()
|
||||||
|
if rerr != nil {
|
||||||
|
return nil, toErr(ctx, rerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
karesp := &LeaseKeepAliveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
}
|
||||||
|
return karesp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||||
|
defer func() {
|
||||||
|
l.mu.Lock()
|
||||||
|
close(l.donec)
|
||||||
|
l.loopErr = gerr
|
||||||
|
for _, ka := range l.keepAlives {
|
||||||
|
ka.Close()
|
||||||
|
}
|
||||||
|
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||||
|
l.mu.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
stream, err := l.resetRecv()
|
||||||
|
if err != nil {
|
||||||
|
if canceledByCaller(l.stopCtx, err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for {
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if canceledByCaller(l.stopCtx, err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
|
||||||
|
l.closeRequireLeader()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
l.recvKeepAlive(resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(retryConnWait):
|
||||||
|
continue
|
||||||
|
case <-l.stopCtx.Done():
|
||||||
|
return l.stopCtx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||||
|
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||||
|
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||||
|
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.stream != nil && l.streamCancel != nil {
|
||||||
|
l.streamCancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
l.streamCancel = cancel
|
||||||
|
l.stream = stream
|
||||||
|
|
||||||
|
go l.sendKeepAliveLoop(stream)
|
||||||
|
return stream, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
|
||||||
|
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||||
|
karesp := &LeaseKeepAliveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
}
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
ka, ok := l.keepAlives[karesp.ID]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if karesp.TTL <= 0 {
|
||||||
|
// lease expired; close all keep alive channels
|
||||||
|
delete(l.keepAlives, karesp.ID)
|
||||||
|
ka.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// send update to all channels
|
||||||
|
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||||
|
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||||
|
for _, ch := range ka.chs {
|
||||||
|
select {
|
||||||
|
case ch <- karesp:
|
||||||
|
ka.nextKeepAlive = nextKeepAlive
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deadlineLoop reaps any keep alive channels that have not received a response
|
||||||
|
// within the lease TTL
|
||||||
|
func (l *lessor) deadlineLoop() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
case <-l.donec:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
l.mu.Lock()
|
||||||
|
for id, ka := range l.keepAlives {
|
||||||
|
if ka.deadline.Before(now) {
|
||||||
|
// waited too long for response; lease may be expired
|
||||||
|
ka.Close()
|
||||||
|
delete(l.keepAlives, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
|
||||||
|
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||||
|
for {
|
||||||
|
var tosend []LeaseID
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
l.mu.Lock()
|
||||||
|
for id, ka := range l.keepAlives {
|
||||||
|
if ka.nextKeepAlive.Before(now) {
|
||||||
|
tosend = append(tosend, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.mu.Unlock()
|
||||||
|
|
||||||
|
for _, id := range tosend {
|
||||||
|
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
|
||||||
|
if err := stream.Send(r); err != nil {
|
||||||
|
// TODO do something with this error?
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(500 * time.Millisecond):
|
||||||
|
case <-stream.Context().Done():
|
||||||
|
return
|
||||||
|
case <-l.donec:
|
||||||
|
return
|
||||||
|
case <-l.stopCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ka *keepAlive) Close() {
|
||||||
|
close(ka.donec)
|
||||||
|
for _, ch := range ka.chs {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
}
|
82
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
Normal file
82
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger is the logger used by client library.
|
||||||
|
// It implements grpclog.Logger interface.
|
||||||
|
type Logger grpclog.Logger
|
||||||
|
|
||||||
|
var (
|
||||||
|
logger settableLogger
|
||||||
|
)
|
||||||
|
|
||||||
|
type settableLogger struct {
|
||||||
|
l grpclog.Logger
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// disable client side logs by default
|
||||||
|
logger.mu.Lock()
|
||||||
|
logger.l = log.New(ioutil.Discard, "", 0)
|
||||||
|
|
||||||
|
// logger has to override the grpclog at initialization so that
|
||||||
|
// any changes to the grpclog go through logger with locking
|
||||||
|
// instead of through SetLogger
|
||||||
|
//
|
||||||
|
// now updates only happen through settableLogger.set
|
||||||
|
grpclog.SetLogger(&logger)
|
||||||
|
logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||||
|
func SetLogger(l Logger) {
|
||||||
|
logger.set(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogger returns the current logger.
|
||||||
|
func GetLogger() Logger {
|
||||||
|
return logger.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *settableLogger) set(l Logger) {
|
||||||
|
s.mu.Lock()
|
||||||
|
logger.l = l
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *settableLogger) get() Logger {
|
||||||
|
s.mu.RLock()
|
||||||
|
l := logger.l
|
||||||
|
s.mu.RUnlock()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement the grpclog.Logger interface
|
||||||
|
|
||||||
|
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||||
|
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||||
|
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||||
|
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||||
|
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||||
|
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
182
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
Normal file
182
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
DefragmentResponse pb.DefragmentResponse
|
||||||
|
AlarmResponse pb.AlarmResponse
|
||||||
|
AlarmMember pb.AlarmMember
|
||||||
|
StatusResponse pb.StatusResponse
|
||||||
|
)
|
||||||
|
|
||||||
|
type Maintenance interface {
|
||||||
|
// AlarmList gets all active alarms.
|
||||||
|
AlarmList(ctx context.Context) (*AlarmResponse, error)
|
||||||
|
|
||||||
|
// AlarmDisarm disarms a given alarm.
|
||||||
|
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||||
|
|
||||||
|
// Defragment defragments storage backend of the etcd member with given endpoint.
|
||||||
|
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||||
|
// the resources.
|
||||||
|
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||||
|
// at the same time.
|
||||||
|
// To defragment multiple members in the cluster, user need to call defragment multiple
|
||||||
|
// times with different endpoints.
|
||||||
|
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
|
||||||
|
|
||||||
|
// Status gets the status of the endpoint.
|
||||||
|
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||||
|
|
||||||
|
// Snapshot provides a reader for a snapshot of a backend.
|
||||||
|
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type maintenance struct {
|
||||||
|
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||||
|
remote pb.MaintenanceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMaintenance(c *Client) Maintenance {
|
||||||
|
return &maintenance{
|
||||||
|
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||||
|
conn, err := c.dial(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
cancel := func() { conn.Close() }
|
||||||
|
return pb.NewMaintenanceClient(conn), cancel, nil
|
||||||
|
},
|
||||||
|
remote: pb.NewMaintenanceClient(c.conn),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||||
|
return &maintenance{
|
||||||
|
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||||
|
return remote, func() {}, nil
|
||||||
|
},
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||||
|
req := &pb.AlarmRequest{
|
||||||
|
Action: pb.AlarmRequest_GET,
|
||||||
|
MemberID: 0, // all
|
||||||
|
Alarm: pb.AlarmType_NONE, // all
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
return (*AlarmResponse)(resp), nil
|
||||||
|
}
|
||||||
|
if isHaltErr(ctx, err) {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||||
|
req := &pb.AlarmRequest{
|
||||||
|
Action: pb.AlarmRequest_DEACTIVATE,
|
||||||
|
MemberID: am.MemberID,
|
||||||
|
Alarm: am.Alarm,
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
|
||||||
|
ar, err := m.AlarmList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
ret := AlarmResponse{}
|
||||||
|
for _, am := range ar.Alarms {
|
||||||
|
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
|
||||||
|
if derr != nil {
|
||||||
|
return nil, toErr(ctx, derr)
|
||||||
|
}
|
||||||
|
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
|
||||||
|
}
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
return (*AlarmResponse)(resp), nil
|
||||||
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
|
||||||
|
remote, cancel, err := m.dial(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
return (*DefragmentResponse)(resp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
|
||||||
|
remote, cancel, err := m.dial(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
return (*StatusResponse)(resp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||||
|
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
resp, err := ss.Recv()
|
||||||
|
if err != nil {
|
||||||
|
pw.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if resp == nil && err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, werr := pw.Write(resp.Blob); werr != nil {
|
||||||
|
pw.CloseWithError(werr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
return pr, nil
|
||||||
|
}
|
437
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
Normal file
437
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
Normal file
|
@ -0,0 +1,437 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
|
type opType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// A default Op has opType 0, which is invalid.
|
||||||
|
tRange opType = iota + 1
|
||||||
|
tPut
|
||||||
|
tDeleteRange
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
noPrefixEnd = []byte{0}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Op represents an Operation that kv can execute.
|
||||||
|
type Op struct {
|
||||||
|
t opType
|
||||||
|
key []byte
|
||||||
|
end []byte
|
||||||
|
|
||||||
|
// for range
|
||||||
|
limit int64
|
||||||
|
sort *SortOption
|
||||||
|
serializable bool
|
||||||
|
keysOnly bool
|
||||||
|
countOnly bool
|
||||||
|
minModRev int64
|
||||||
|
maxModRev int64
|
||||||
|
minCreateRev int64
|
||||||
|
maxCreateRev int64
|
||||||
|
|
||||||
|
// for range, watch
|
||||||
|
rev int64
|
||||||
|
|
||||||
|
// for watch, put, delete
|
||||||
|
prevKV bool
|
||||||
|
|
||||||
|
// for put
|
||||||
|
ignoreValue bool
|
||||||
|
ignoreLease bool
|
||||||
|
|
||||||
|
// progressNotify is for progress updates.
|
||||||
|
progressNotify bool
|
||||||
|
// createdNotify is for created event
|
||||||
|
createdNotify bool
|
||||||
|
// filters for watchers
|
||||||
|
filterPut bool
|
||||||
|
filterDelete bool
|
||||||
|
|
||||||
|
// for put
|
||||||
|
val []byte
|
||||||
|
leaseID LeaseID
|
||||||
|
}
|
||||||
|
|
||||||
|
// accesors / mutators
|
||||||
|
|
||||||
|
// KeyBytes returns the byte slice holding the Op's key.
|
||||||
|
func (op Op) KeyBytes() []byte { return op.key }
|
||||||
|
|
||||||
|
// WithKeyBytes sets the byte slice for the Op's key.
|
||||||
|
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
||||||
|
|
||||||
|
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||||
|
func (op Op) RangeBytes() []byte { return op.end }
|
||||||
|
|
||||||
|
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||||
|
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||||
|
|
||||||
|
// ValueBytes returns the byte slice holding the Op's value, if any.
|
||||||
|
func (op Op) ValueBytes() []byte { return op.val }
|
||||||
|
|
||||||
|
// WithValueBytes sets the byte slice for the Op's value.
|
||||||
|
func (op *Op) WithValueBytes(v []byte) { op.val = v }
|
||||||
|
|
||||||
|
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||||
|
if op.t != tRange {
|
||||||
|
panic("op.t != tRange")
|
||||||
|
}
|
||||||
|
r := &pb.RangeRequest{
|
||||||
|
Key: op.key,
|
||||||
|
RangeEnd: op.end,
|
||||||
|
Limit: op.limit,
|
||||||
|
Revision: op.rev,
|
||||||
|
Serializable: op.serializable,
|
||||||
|
KeysOnly: op.keysOnly,
|
||||||
|
CountOnly: op.countOnly,
|
||||||
|
MinModRevision: op.minModRev,
|
||||||
|
MaxModRevision: op.maxModRev,
|
||||||
|
MinCreateRevision: op.minCreateRev,
|
||||||
|
MaxCreateRevision: op.maxCreateRev,
|
||||||
|
}
|
||||||
|
if op.sort != nil {
|
||||||
|
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||||
|
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op Op) toRequestOp() *pb.RequestOp {
|
||||||
|
switch op.t {
|
||||||
|
case tRange:
|
||||||
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||||
|
case tPut:
|
||||||
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||||
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||||
|
case tDeleteRange:
|
||||||
|
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||||
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
||||||
|
default:
|
||||||
|
panic("Unknown Op")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op Op) isWrite() bool {
|
||||||
|
return op.t != tRange
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpGet(key string, opts ...OpOption) Op {
|
||||||
|
ret := Op{t: tRange, key: []byte(key)}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpDelete(key string, opts ...OpOption) Op {
|
||||||
|
ret := Op{t: tDeleteRange, key: []byte(key)}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
switch {
|
||||||
|
case ret.leaseID != 0:
|
||||||
|
panic("unexpected lease in delete")
|
||||||
|
case ret.limit != 0:
|
||||||
|
panic("unexpected limit in delete")
|
||||||
|
case ret.rev != 0:
|
||||||
|
panic("unexpected revision in delete")
|
||||||
|
case ret.sort != nil:
|
||||||
|
panic("unexpected sort in delete")
|
||||||
|
case ret.serializable:
|
||||||
|
panic("unexpected serializable in delete")
|
||||||
|
case ret.countOnly:
|
||||||
|
panic("unexpected countOnly in delete")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in delete")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in delete")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in delete")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in delete")
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpPut(key, val string, opts ...OpOption) Op {
|
||||||
|
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
switch {
|
||||||
|
case ret.end != nil:
|
||||||
|
panic("unexpected range in put")
|
||||||
|
case ret.limit != 0:
|
||||||
|
panic("unexpected limit in put")
|
||||||
|
case ret.rev != 0:
|
||||||
|
panic("unexpected revision in put")
|
||||||
|
case ret.sort != nil:
|
||||||
|
panic("unexpected sort in put")
|
||||||
|
case ret.serializable:
|
||||||
|
panic("unexpected serializable in put")
|
||||||
|
case ret.countOnly:
|
||||||
|
panic("unexpected countOnly in put")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in put")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in put")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in put")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in put")
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func opWatch(key string, opts ...OpOption) Op {
|
||||||
|
ret := Op{t: tRange, key: []byte(key)}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
switch {
|
||||||
|
case ret.leaseID != 0:
|
||||||
|
panic("unexpected lease in watch")
|
||||||
|
case ret.limit != 0:
|
||||||
|
panic("unexpected limit in watch")
|
||||||
|
case ret.sort != nil:
|
||||||
|
panic("unexpected sort in watch")
|
||||||
|
case ret.serializable:
|
||||||
|
panic("unexpected serializable in watch")
|
||||||
|
case ret.countOnly:
|
||||||
|
panic("unexpected countOnly in watch")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in watch")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in watch")
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *Op) applyOpts(opts []OpOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpOption configures Operations like Get, Put, Delete.
|
||||||
|
type OpOption func(*Op)
|
||||||
|
|
||||||
|
// WithLease attaches a lease ID to a key in 'Put' request.
|
||||||
|
func WithLease(leaseID LeaseID) OpOption {
|
||||||
|
return func(op *Op) { op.leaseID = leaseID }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLimit limits the number of results to return from 'Get' request.
|
||||||
|
// If WithLimit is given a 0 limit, it is treated as no limit.
|
||||||
|
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
|
||||||
|
|
||||||
|
// WithRev specifies the store revision for 'Get' request.
|
||||||
|
// Or the start revision of 'Watch' request.
|
||||||
|
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
||||||
|
|
||||||
|
// WithSort specifies the ordering in 'Get' request. It requires
|
||||||
|
// 'WithRange' and/or 'WithPrefix' to be specified too.
|
||||||
|
// 'target' specifies the target to sort by: key, version, revisions, value.
|
||||||
|
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||||
|
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
if target == SortByKey && order == SortAscend {
|
||||||
|
// If order != SortNone, server fetches the entire key-space,
|
||||||
|
// and then applies the sort and limit, if provided.
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexicographically ascending order,
|
||||||
|
// client should ignore SortOrder if the target is SortByKey.
|
||||||
|
order = SortNone
|
||||||
|
}
|
||||||
|
op.sort = &SortOption{target, order}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrefixRangeEnd gets the range end of the prefix.
|
||||||
|
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
|
||||||
|
func GetPrefixRangeEnd(prefix string) string {
|
||||||
|
return string(getPrefix([]byte(prefix)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPrefix(key []byte) []byte {
|
||||||
|
end := make([]byte, len(key))
|
||||||
|
copy(end, key)
|
||||||
|
for i := len(end) - 1; i >= 0; i-- {
|
||||||
|
if end[i] < 0xff {
|
||||||
|
end[i] = end[i] + 1
|
||||||
|
end = end[:i+1]
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// next prefix does not exist (e.g., 0xffff);
|
||||||
|
// default to WithFromKey policy
|
||||||
|
return noPrefixEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
|
||||||
|
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
|
||||||
|
// can return 'foo1', 'foo2', and so on.
|
||||||
|
func WithPrefix() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
if len(op.key) == 0 {
|
||||||
|
op.key, op.end = []byte{0}, []byte{0}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
op.end = getPrefix(op.key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
|
||||||
|
// For example, 'Get' requests with 'WithRange(end)' returns
|
||||||
|
// the keys in the range [key, end).
|
||||||
|
// endKey must be lexicographically greater than start key.
|
||||||
|
func WithRange(endKey string) OpOption {
|
||||||
|
return func(op *Op) { op.end = []byte(endKey) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
|
||||||
|
// to be equal or greater than the key in the argument.
|
||||||
|
func WithFromKey() OpOption { return WithRange("\x00") }
|
||||||
|
|
||||||
|
// WithSerializable makes 'Get' request serializable. By default,
|
||||||
|
// it's linearizable. Serializable requests are better for lower latency
|
||||||
|
// requirement.
|
||||||
|
func WithSerializable() OpOption {
|
||||||
|
return func(op *Op) { op.serializable = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
|
||||||
|
// values will be omitted.
|
||||||
|
func WithKeysOnly() OpOption {
|
||||||
|
return func(op *Op) { op.keysOnly = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCountOnly makes the 'Get' request return only the count of keys.
|
||||||
|
func WithCountOnly() OpOption {
|
||||||
|
return func(op *Op) { op.countOnly = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||||
|
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||||
|
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||||
|
|
||||||
|
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||||
|
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||||
|
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||||
|
|
||||||
|
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||||
|
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||||
|
|
||||||
|
// WithLastCreate gets the key with the latest creation revision in the request range.
|
||||||
|
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
|
||||||
|
|
||||||
|
// WithFirstKey gets the lexically first key in the request range.
|
||||||
|
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
|
||||||
|
|
||||||
|
// WithLastKey gets the lexically last key in the request range.
|
||||||
|
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
|
||||||
|
|
||||||
|
// WithFirstRev gets the key with the oldest modification revision in the request range.
|
||||||
|
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
|
||||||
|
|
||||||
|
// WithLastRev gets the key with the latest modification revision in the request range.
|
||||||
|
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
|
||||||
|
|
||||||
|
// withTop gets the first key over the get's prefix given a sort order
|
||||||
|
func withTop(target SortTarget, order SortOrder) []OpOption {
|
||||||
|
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithProgressNotify makes watch server send periodic progress updates
|
||||||
|
// every 10 minutes when there is no incoming events.
|
||||||
|
// Progress updates have zero events in WatchResponse.
|
||||||
|
func WithProgressNotify() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.progressNotify = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCreatedNotify makes watch server sends the created event.
|
||||||
|
func WithCreatedNotify() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.createdNotify = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterPut discards PUT events from the watcher.
|
||||||
|
func WithFilterPut() OpOption {
|
||||||
|
return func(op *Op) { op.filterPut = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterDelete discards DELETE events from the watcher.
|
||||||
|
func WithFilterDelete() OpOption {
|
||||||
|
return func(op *Op) { op.filterDelete = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||||
|
// nothing will be returned.
|
||||||
|
func WithPrevKV() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.prevKV = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIgnoreValue updates the key using its current value.
|
||||||
|
// Empty value should be passed when ignore_value is set.
|
||||||
|
// Returns an error if the key does not exist.
|
||||||
|
func WithIgnoreValue() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.ignoreValue = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIgnoreLease updates the key using its current lease.
|
||||||
|
// Empty lease should be passed when ignore_lease is set.
|
||||||
|
// Returns an error if the key does not exist.
|
||||||
|
func WithIgnoreLease() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.ignoreLease = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOp represents an Operation that lease can execute.
|
||||||
|
type LeaseOp struct {
|
||||||
|
id LeaseID
|
||||||
|
|
||||||
|
// for TimeToLive
|
||||||
|
attachedKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOption configures lease operations.
|
||||||
|
type LeaseOption func(*LeaseOp)
|
||||||
|
|
||||||
|
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttachedKeys requests lease timetolive API to return
|
||||||
|
// attached keys of given lease ID.
|
||||||
|
func WithAttachedKeys() LeaseOption {
|
||||||
|
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||||
|
ret := &LeaseOp{id: id}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||||
|
}
|
293
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
Normal file
293
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,293 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rpcFunc func(ctx context.Context) error
|
||||||
|
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||||
|
|
||||||
|
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||||
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
|
for {
|
||||||
|
err := f(rpcCtx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
eErr := rpctypes.Error(err)
|
||||||
|
// always stop retry on etcd errors
|
||||||
|
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// only retry if unavailable
|
||||||
|
if grpc.Code(err) != codes.Unavailable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-c.balancer.ConnectNotify():
|
||||||
|
case <-rpcCtx.Done():
|
||||||
|
return rpcCtx.Err()
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return c.ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||||
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
|
for {
|
||||||
|
err := f(rpcCtx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// always stop retry on etcd errors other than invalid auth token
|
||||||
|
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||||
|
gterr := c.getToken(rpcCtx)
|
||||||
|
if gterr != nil {
|
||||||
|
return err // return the original error for simplicity
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
|
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryKVClient struct {
|
||||||
|
*retryWriteKVClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryWriteKVClient struct {
|
||||||
|
pb.KVClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryLeaseClient struct {
|
||||||
|
pb.LeaseClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
|
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryClusterClient struct {
|
||||||
|
pb.ClusterClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||||
|
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryAuthClient struct {
|
||||||
|
pb.AuthClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||||
|
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
37
vendor/github.com/coreos/etcd/clientv3/sort.go
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/clientv3/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
type SortTarget int
|
||||||
|
type SortOrder int
|
||||||
|
|
||||||
|
const (
|
||||||
|
SortNone SortOrder = iota
|
||||||
|
SortAscend
|
||||||
|
SortDescend
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SortByKey SortTarget = iota
|
||||||
|
SortByVersion
|
||||||
|
SortByCreateRevision
|
||||||
|
SortByModRevision
|
||||||
|
SortByValue
|
||||||
|
)
|
||||||
|
|
||||||
|
type SortOption struct {
|
||||||
|
Target SortTarget
|
||||||
|
Order SortOrder
|
||||||
|
}
|
164
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
Normal file
164
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Txn is the interface that wraps mini-transactions.
|
||||||
|
//
|
||||||
|
// Tx.If(
|
||||||
|
// Compare(Value(k1), ">", v1),
|
||||||
|
// Compare(Version(k1), "=", 2)
|
||||||
|
// ).Then(
|
||||||
|
// OpPut(k2,v2), OpPut(k3,v3)
|
||||||
|
// ).Else(
|
||||||
|
// OpPut(k4,v4), OpPut(k5,v5)
|
||||||
|
// ).Commit()
|
||||||
|
//
|
||||||
|
type Txn interface {
|
||||||
|
// If takes a list of comparison. If all comparisons passed in succeed,
|
||||||
|
// the operations passed into Then() will be executed. Or the operations
|
||||||
|
// passed into Else() will be executed.
|
||||||
|
If(cs ...Cmp) Txn
|
||||||
|
|
||||||
|
// Then takes a list of operations. The Ops list will be executed, if the
|
||||||
|
// comparisons passed in If() succeed.
|
||||||
|
Then(ops ...Op) Txn
|
||||||
|
|
||||||
|
// Else takes a list of operations. The Ops list will be executed, if the
|
||||||
|
// comparisons passed in If() fail.
|
||||||
|
Else(ops ...Op) Txn
|
||||||
|
|
||||||
|
// Commit tries to commit the transaction.
|
||||||
|
Commit() (*TxnResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type txn struct {
|
||||||
|
kv *kv
|
||||||
|
ctx context.Context
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
cif bool
|
||||||
|
cthen bool
|
||||||
|
celse bool
|
||||||
|
|
||||||
|
isWrite bool
|
||||||
|
|
||||||
|
cmps []*pb.Compare
|
||||||
|
|
||||||
|
sus []*pb.RequestOp
|
||||||
|
fas []*pb.RequestOp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txn *txn) If(cs ...Cmp) Txn {
|
||||||
|
txn.mu.Lock()
|
||||||
|
defer txn.mu.Unlock()
|
||||||
|
|
||||||
|
if txn.cif {
|
||||||
|
panic("cannot call If twice!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if txn.cthen {
|
||||||
|
panic("cannot call If after Then!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if txn.celse {
|
||||||
|
panic("cannot call If after Else!")
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.cif = true
|
||||||
|
|
||||||
|
for i := range cs {
|
||||||
|
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return txn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txn *txn) Then(ops ...Op) Txn {
|
||||||
|
txn.mu.Lock()
|
||||||
|
defer txn.mu.Unlock()
|
||||||
|
|
||||||
|
if txn.cthen {
|
||||||
|
panic("cannot call Then twice!")
|
||||||
|
}
|
||||||
|
if txn.celse {
|
||||||
|
panic("cannot call Then after Else!")
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.cthen = true
|
||||||
|
|
||||||
|
for _, op := range ops {
|
||||||
|
txn.isWrite = txn.isWrite || op.isWrite()
|
||||||
|
txn.sus = append(txn.sus, op.toRequestOp())
|
||||||
|
}
|
||||||
|
|
||||||
|
return txn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txn *txn) Else(ops ...Op) Txn {
|
||||||
|
txn.mu.Lock()
|
||||||
|
defer txn.mu.Unlock()
|
||||||
|
|
||||||
|
if txn.celse {
|
||||||
|
panic("cannot call Else twice!")
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.celse = true
|
||||||
|
|
||||||
|
for _, op := range ops {
|
||||||
|
txn.isWrite = txn.isWrite || op.isWrite()
|
||||||
|
txn.fas = append(txn.fas, op.toRequestOp())
|
||||||
|
}
|
||||||
|
|
||||||
|
return txn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||||
|
txn.mu.Lock()
|
||||||
|
defer txn.mu.Unlock()
|
||||||
|
for {
|
||||||
|
resp, err := txn.commit()
|
||||||
|
if err == nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
if isHaltErr(txn.ctx, err) {
|
||||||
|
return nil, toErr(txn.ctx, err)
|
||||||
|
}
|
||||||
|
if txn.isWrite {
|
||||||
|
return nil, toErr(txn.ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txn *txn) commit() (*TxnResponse, error) {
|
||||||
|
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||||
|
|
||||||
|
var opts []grpc.CallOption
|
||||||
|
if !txn.isWrite {
|
||||||
|
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||||
|
}
|
||||||
|
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return (*TxnResponse)(resp), nil
|
||||||
|
}
|
797
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
Normal file
797
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
Normal file
|
@ -0,0 +1,797 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
EventTypeDelete = mvccpb.DELETE
|
||||||
|
EventTypePut = mvccpb.PUT
|
||||||
|
|
||||||
|
closeSendErrTimeout = 250 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event mvccpb.Event
|
||||||
|
|
||||||
|
type WatchChan <-chan WatchResponse
|
||||||
|
|
||||||
|
type Watcher interface {
|
||||||
|
// Watch watches on a key or prefix. The watched events will be returned
|
||||||
|
// through the returned channel.
|
||||||
|
// If the watch is slow or the required rev is compacted, the watch request
|
||||||
|
// might be canceled from the server-side and the chan will be closed.
|
||||||
|
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
|
||||||
|
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||||
|
|
||||||
|
// Close closes the watcher and cancels all watch requests.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type WatchResponse struct {
|
||||||
|
Header pb.ResponseHeader
|
||||||
|
Events []*Event
|
||||||
|
|
||||||
|
// CompactRevision is the minimum revision the watcher may receive.
|
||||||
|
CompactRevision int64
|
||||||
|
|
||||||
|
// Canceled is used to indicate watch failure.
|
||||||
|
// If the watch failed and the stream was about to close, before the channel is closed,
|
||||||
|
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||||
|
Canceled bool
|
||||||
|
|
||||||
|
// Created is used to indicate the creation of the watcher.
|
||||||
|
Created bool
|
||||||
|
|
||||||
|
closeErr error
|
||||||
|
|
||||||
|
// cancelReason is a reason of canceling watch
|
||||||
|
cancelReason string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCreate returns true if the event tells that the key is newly created.
|
||||||
|
func (e *Event) IsCreate() bool {
|
||||||
|
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsModify returns true if the event tells that a new value is put on existing key.
|
||||||
|
func (e *Event) IsModify() bool {
|
||||||
|
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err is the error value if this WatchResponse holds an error.
|
||||||
|
func (wr *WatchResponse) Err() error {
|
||||||
|
switch {
|
||||||
|
case wr.closeErr != nil:
|
||||||
|
return v3rpc.Error(wr.closeErr)
|
||||||
|
case wr.CompactRevision != 0:
|
||||||
|
return v3rpc.ErrCompacted
|
||||||
|
case wr.Canceled:
|
||||||
|
if len(wr.cancelReason) != 0 {
|
||||||
|
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
|
||||||
|
}
|
||||||
|
return v3rpc.ErrFutureRev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||||
|
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||||
|
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher implements the Watcher interface
|
||||||
|
type watcher struct {
|
||||||
|
remote pb.WatchClient
|
||||||
|
|
||||||
|
// mu protects the grpc streams map
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// streams holds all the active grpc streams keyed by ctx value.
|
||||||
|
streams map[string]*watchGrpcStream
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||||
|
type watchGrpcStream struct {
|
||||||
|
owner *watcher
|
||||||
|
remote pb.WatchClient
|
||||||
|
|
||||||
|
// ctx controls internal remote.Watch requests
|
||||||
|
ctx context.Context
|
||||||
|
// ctxKey is the key used when looking up this stream's context
|
||||||
|
ctxKey string
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// substreams holds all active watchers on this grpc stream
|
||||||
|
substreams map[int64]*watcherStream
|
||||||
|
// resuming holds all resuming watchers on this grpc stream
|
||||||
|
resuming []*watcherStream
|
||||||
|
|
||||||
|
// reqc sends a watch request from Watch() to the main goroutine
|
||||||
|
reqc chan *watchRequest
|
||||||
|
// respc receives data from the watch client
|
||||||
|
respc chan *pb.WatchResponse
|
||||||
|
// donec closes to broadcast shutdown
|
||||||
|
donec chan struct{}
|
||||||
|
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||||
|
errc chan error
|
||||||
|
// closingc gets the watcherStream of closing watchers
|
||||||
|
closingc chan *watcherStream
|
||||||
|
// wg is Done when all substream goroutines have exited
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// resumec closes to signal that all substreams should begin resuming
|
||||||
|
resumec chan struct{}
|
||||||
|
// closeErr is the error that closed the watch stream
|
||||||
|
closeErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchRequest is issued by the subscriber to start a new watcher
|
||||||
|
type watchRequest struct {
|
||||||
|
ctx context.Context
|
||||||
|
key string
|
||||||
|
end string
|
||||||
|
rev int64
|
||||||
|
// send created notification event if this field is true
|
||||||
|
createdNotify bool
|
||||||
|
// progressNotify is for progress updates
|
||||||
|
progressNotify bool
|
||||||
|
// filters is the list of events to filter out
|
||||||
|
filters []pb.WatchCreateRequest_FilterType
|
||||||
|
// get the previous key-value pair before the event happens
|
||||||
|
prevKV bool
|
||||||
|
// retc receives a chan WatchResponse once the watcher is established
|
||||||
|
retc chan chan WatchResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcherStream represents a registered watcher
|
||||||
|
type watcherStream struct {
|
||||||
|
// initReq is the request that initiated this request
|
||||||
|
initReq watchRequest
|
||||||
|
|
||||||
|
// outc publishes watch responses to subscriber
|
||||||
|
outc chan WatchResponse
|
||||||
|
// recvc buffers watch responses before publishing
|
||||||
|
recvc chan *WatchResponse
|
||||||
|
// donec closes when the watcherStream goroutine stops.
|
||||||
|
donec chan struct{}
|
||||||
|
// closing is set to true when stream should be scheduled to shutdown.
|
||||||
|
closing bool
|
||||||
|
// id is the registered watch id on the grpc stream
|
||||||
|
id int64
|
||||||
|
|
||||||
|
// buf holds all events received from etcd but not yet consumed by the client
|
||||||
|
buf []*WatchResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatcher(c *Client) Watcher {
|
||||||
|
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||||
|
return &watcher{
|
||||||
|
remote: wc,
|
||||||
|
streams: make(map[string]*watchGrpcStream),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// never closes
|
||||||
|
var valCtxCh = make(chan struct{})
|
||||||
|
var zeroTime = time.Unix(0, 0)
|
||||||
|
|
||||||
|
// ctx with only the values; never Done
|
||||||
|
type valCtx struct{ context.Context }
|
||||||
|
|
||||||
|
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
|
||||||
|
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
|
||||||
|
func (vc *valCtx) Err() error { return nil }
|
||||||
|
|
||||||
|
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||||
|
ctx, cancel := context.WithCancel(&valCtx{inctx})
|
||||||
|
wgs := &watchGrpcStream{
|
||||||
|
owner: w,
|
||||||
|
remote: w.remote,
|
||||||
|
ctx: ctx,
|
||||||
|
ctxKey: fmt.Sprintf("%v", inctx),
|
||||||
|
cancel: cancel,
|
||||||
|
substreams: make(map[int64]*watcherStream),
|
||||||
|
|
||||||
|
respc: make(chan *pb.WatchResponse),
|
||||||
|
reqc: make(chan *watchRequest),
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
errc: make(chan error, 1),
|
||||||
|
closingc: make(chan *watcherStream),
|
||||||
|
resumec: make(chan struct{}),
|
||||||
|
}
|
||||||
|
go wgs.run()
|
||||||
|
return wgs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch posts a watch request to run() and waits for a new watcher channel
|
||||||
|
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||||
|
ow := opWatch(key, opts...)
|
||||||
|
|
||||||
|
var filters []pb.WatchCreateRequest_FilterType
|
||||||
|
if ow.filterPut {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||||
|
}
|
||||||
|
if ow.filterDelete {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||||
|
}
|
||||||
|
|
||||||
|
wr := &watchRequest{
|
||||||
|
ctx: ctx,
|
||||||
|
createdNotify: ow.createdNotify,
|
||||||
|
key: string(ow.key),
|
||||||
|
end: string(ow.end),
|
||||||
|
rev: ow.rev,
|
||||||
|
progressNotify: ow.progressNotify,
|
||||||
|
filters: filters,
|
||||||
|
prevKV: ow.prevKV,
|
||||||
|
retc: make(chan chan WatchResponse, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := false
|
||||||
|
ctxKey := fmt.Sprintf("%v", ctx)
|
||||||
|
|
||||||
|
// find or allocate appropriate grpc watch stream
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.streams == nil {
|
||||||
|
// closed
|
||||||
|
w.mu.Unlock()
|
||||||
|
ch := make(chan WatchResponse)
|
||||||
|
close(ch)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
wgs := w.streams[ctxKey]
|
||||||
|
if wgs == nil {
|
||||||
|
wgs = w.newWatcherGrpcStream(ctx)
|
||||||
|
w.streams[ctxKey] = wgs
|
||||||
|
}
|
||||||
|
donec := wgs.donec
|
||||||
|
reqc := wgs.reqc
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// couldn't create channel; return closed channel
|
||||||
|
closeCh := make(chan WatchResponse, 1)
|
||||||
|
|
||||||
|
// submit request
|
||||||
|
select {
|
||||||
|
case reqc <- wr:
|
||||||
|
ok = true
|
||||||
|
case <-wr.ctx.Done():
|
||||||
|
case <-donec:
|
||||||
|
if wgs.closeErr != nil {
|
||||||
|
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// retry; may have dropped stream from no ctxs
|
||||||
|
return w.Watch(ctx, key, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// receive channel
|
||||||
|
if ok {
|
||||||
|
select {
|
||||||
|
case ret := <-wr.retc:
|
||||||
|
return ret
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-donec:
|
||||||
|
if wgs.closeErr != nil {
|
||||||
|
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// retry; may have dropped stream from no ctxs
|
||||||
|
return w.Watch(ctx, key, opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(closeCh)
|
||||||
|
return closeCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watcher) Close() (err error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
streams := w.streams
|
||||||
|
w.streams = nil
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, wgs := range streams {
|
||||||
|
if werr := wgs.Close(); werr != nil {
|
||||||
|
err = werr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) Close() (err error) {
|
||||||
|
w.cancel()
|
||||||
|
<-w.donec
|
||||||
|
select {
|
||||||
|
case err = <-w.errc:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return toErr(w.ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watcher) closeStream(wgs *watchGrpcStream) {
|
||||||
|
w.mu.Lock()
|
||||||
|
close(wgs.donec)
|
||||||
|
wgs.cancel()
|
||||||
|
if w.streams != nil {
|
||||||
|
delete(w.streams, wgs.ctxKey)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
||||||
|
if resp.WatchId == -1 {
|
||||||
|
// failed; no channel
|
||||||
|
close(ws.recvc)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ws.id = resp.WatchId
|
||||||
|
w.substreams[ws.id] = ws
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
|
||||||
|
select {
|
||||||
|
case ws.outc <- *resp:
|
||||||
|
case <-ws.initReq.ctx.Done():
|
||||||
|
case <-time.After(closeSendErrTimeout):
|
||||||
|
}
|
||||||
|
close(ws.outc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||||
|
// send channel response in case stream was never established
|
||||||
|
select {
|
||||||
|
case ws.initReq.retc <- ws.outc:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
// close subscriber's channel
|
||||||
|
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||||
|
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
|
||||||
|
} else if ws.outc != nil {
|
||||||
|
close(ws.outc)
|
||||||
|
}
|
||||||
|
if ws.id != -1 {
|
||||||
|
delete(w.substreams, ws.id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := range w.resuming {
|
||||||
|
if w.resuming[i] == ws {
|
||||||
|
w.resuming[i] = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// run is the root of the goroutines for managing a watcher client
|
||||||
|
func (w *watchGrpcStream) run() {
|
||||||
|
var wc pb.Watch_WatchClient
|
||||||
|
var closeErr error
|
||||||
|
|
||||||
|
// substreams marked to close but goroutine still running; needed for
|
||||||
|
// avoiding double-closing recvc on grpc stream teardown
|
||||||
|
closing := make(map[*watcherStream]struct{})
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
w.closeErr = closeErr
|
||||||
|
// shutdown substreams and resuming substreams
|
||||||
|
for _, ws := range w.substreams {
|
||||||
|
if _, ok := closing[ws]; !ok {
|
||||||
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ws := range w.resuming {
|
||||||
|
if _, ok := closing[ws]; ws != nil && !ok {
|
||||||
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.joinSubstreams()
|
||||||
|
for range closing {
|
||||||
|
w.closeSubstream(<-w.closingc)
|
||||||
|
}
|
||||||
|
w.wg.Wait()
|
||||||
|
w.owner.closeStream(w)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// start a stream with the etcd grpc server
|
||||||
|
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cancelSet := make(map[int64]struct{})
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Watch() requested
|
||||||
|
case wreq := <-w.reqc:
|
||||||
|
outc := make(chan WatchResponse, 1)
|
||||||
|
ws := &watcherStream{
|
||||||
|
initReq: *wreq,
|
||||||
|
id: -1,
|
||||||
|
outc: outc,
|
||||||
|
// unbufffered so resumes won't cause repeat events
|
||||||
|
recvc: make(chan *WatchResponse),
|
||||||
|
}
|
||||||
|
|
||||||
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
|
go w.serveSubstream(ws, w.resumec)
|
||||||
|
|
||||||
|
// queue up for watcher creation/resume
|
||||||
|
w.resuming = append(w.resuming, ws)
|
||||||
|
if len(w.resuming) == 1 {
|
||||||
|
// head of resume queue, can register a new watcher
|
||||||
|
wc.Send(ws.initReq.toPB())
|
||||||
|
}
|
||||||
|
// New events from the watch client
|
||||||
|
case pbresp := <-w.respc:
|
||||||
|
switch {
|
||||||
|
case pbresp.Created:
|
||||||
|
// response to head of queue creation
|
||||||
|
if ws := w.resuming[0]; ws != nil {
|
||||||
|
w.addSubstream(pbresp, ws)
|
||||||
|
w.dispatchEvent(pbresp)
|
||||||
|
w.resuming[0] = nil
|
||||||
|
}
|
||||||
|
if ws := w.nextResume(); ws != nil {
|
||||||
|
wc.Send(ws.initReq.toPB())
|
||||||
|
}
|
||||||
|
case pbresp.Canceled:
|
||||||
|
delete(cancelSet, pbresp.WatchId)
|
||||||
|
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||||
|
// signal to stream goroutine to update closingc
|
||||||
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// dispatch to appropriate watch stream
|
||||||
|
if ok := w.dispatchEvent(pbresp); ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// watch response on unexpected watch id; cancel id
|
||||||
|
if _, ok := cancelSet[pbresp.WatchId]; ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cancelSet[pbresp.WatchId] = struct{}{}
|
||||||
|
cr := &pb.WatchRequest_CancelRequest{
|
||||||
|
CancelRequest: &pb.WatchCancelRequest{
|
||||||
|
WatchId: pbresp.WatchId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
req := &pb.WatchRequest{RequestUnion: cr}
|
||||||
|
wc.Send(req)
|
||||||
|
}
|
||||||
|
// watch client failed to recv; spawn another if possible
|
||||||
|
case err := <-w.errc:
|
||||||
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
|
closeErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ws := w.nextResume(); ws != nil {
|
||||||
|
wc.Send(ws.initReq.toPB())
|
||||||
|
}
|
||||||
|
cancelSet = make(map[int64]struct{})
|
||||||
|
case <-w.ctx.Done():
|
||||||
|
return
|
||||||
|
case ws := <-w.closingc:
|
||||||
|
w.closeSubstream(ws)
|
||||||
|
delete(closing, ws)
|
||||||
|
if len(w.substreams)+len(w.resuming) == 0 {
|
||||||
|
// no more watchers on this stream, shutdown
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
|
||||||
|
// streams are marked as nil in the queue since the head must wait for its inflight registration.
|
||||||
|
func (w *watchGrpcStream) nextResume() *watcherStream {
|
||||||
|
for len(w.resuming) != 0 {
|
||||||
|
if w.resuming[0] != nil {
|
||||||
|
return w.resuming[0]
|
||||||
|
}
|
||||||
|
w.resuming = w.resuming[1:len(w.resuming)]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
|
||||||
|
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||||
|
events := make([]*Event, len(pbresp.Events))
|
||||||
|
for i, ev := range pbresp.Events {
|
||||||
|
events[i] = (*Event)(ev)
|
||||||
|
}
|
||||||
|
wr := &WatchResponse{
|
||||||
|
Header: *pbresp.Header,
|
||||||
|
Events: events,
|
||||||
|
CompactRevision: pbresp.CompactRevision,
|
||||||
|
Created: pbresp.Created,
|
||||||
|
Canceled: pbresp.Canceled,
|
||||||
|
cancelReason: pbresp.CancelReason,
|
||||||
|
}
|
||||||
|
ws, ok := w.substreams[pbresp.WatchId]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case ws.recvc <- wr:
|
||||||
|
case <-ws.donec:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// serveWatchClient forwards messages from the grpc stream to run()
|
||||||
|
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
|
||||||
|
for {
|
||||||
|
resp, err := wc.Recv()
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case w.errc <- err:
|
||||||
|
case <-w.donec:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case w.respc <- resp:
|
||||||
|
case <-w.donec:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// serveSubstream forwards watch responses from run() to the subscriber
|
||||||
|
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
|
||||||
|
if ws.closing {
|
||||||
|
panic("created substream goroutine but substream is closing")
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextRev is the minimum expected next revision
|
||||||
|
nextRev := ws.initReq.rev
|
||||||
|
resuming := false
|
||||||
|
defer func() {
|
||||||
|
if !resuming {
|
||||||
|
ws.closing = true
|
||||||
|
}
|
||||||
|
close(ws.donec)
|
||||||
|
if !resuming {
|
||||||
|
w.closingc <- ws
|
||||||
|
}
|
||||||
|
w.wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
emptyWr := &WatchResponse{}
|
||||||
|
for {
|
||||||
|
curWr := emptyWr
|
||||||
|
outc := ws.outc
|
||||||
|
|
||||||
|
if len(ws.buf) > 0 {
|
||||||
|
curWr = ws.buf[0]
|
||||||
|
} else {
|
||||||
|
outc = nil
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case outc <- *curWr:
|
||||||
|
if ws.buf[0].Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ws.buf[0] = nil
|
||||||
|
ws.buf = ws.buf[1:]
|
||||||
|
case wr, ok := <-ws.recvc:
|
||||||
|
if !ok {
|
||||||
|
// shutdown from closeSubstream
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if wr.Created {
|
||||||
|
if ws.initReq.retc != nil {
|
||||||
|
ws.initReq.retc <- ws.outc
|
||||||
|
// to prevent next write from taking the slot in buffered channel
|
||||||
|
// and posting duplicate create events
|
||||||
|
ws.initReq.retc = nil
|
||||||
|
|
||||||
|
// send first creation event only if requested
|
||||||
|
if ws.initReq.createdNotify {
|
||||||
|
ws.outc <- *wr
|
||||||
|
}
|
||||||
|
// once the watch channel is returned, a current revision
|
||||||
|
// watch must resume at the store revision. This is necessary
|
||||||
|
// for the following case to work as expected:
|
||||||
|
// wch := m1.Watch("a")
|
||||||
|
// m2.Put("a", "b")
|
||||||
|
// <-wch
|
||||||
|
// If the revision is only bound on the first observed event,
|
||||||
|
// if wch is disconnected before the Put is issued, then reconnects
|
||||||
|
// after it is committed, it'll miss the Put.
|
||||||
|
if ws.initReq.rev == 0 {
|
||||||
|
nextRev = wr.Header.Revision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// current progress of watch; <= store revision
|
||||||
|
nextRev = wr.Header.Revision
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(wr.Events) > 0 {
|
||||||
|
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||||
|
}
|
||||||
|
ws.initReq.rev = nextRev
|
||||||
|
|
||||||
|
// created event is already sent above,
|
||||||
|
// watcher should not post duplicate events
|
||||||
|
if wr.Created {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO pause channel if buffer gets too large
|
||||||
|
ws.buf = append(ws.buf, wr)
|
||||||
|
case <-w.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ws.initReq.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-resumec:
|
||||||
|
resuming = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// lazily send cancel message if events on missing id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||||
|
// mark all substreams as resuming
|
||||||
|
close(w.resumec)
|
||||||
|
w.resumec = make(chan struct{})
|
||||||
|
w.joinSubstreams()
|
||||||
|
for _, ws := range w.substreams {
|
||||||
|
ws.id = -1
|
||||||
|
w.resuming = append(w.resuming, ws)
|
||||||
|
}
|
||||||
|
// strip out nils, if any
|
||||||
|
var resuming []*watcherStream
|
||||||
|
for _, ws := range w.resuming {
|
||||||
|
if ws != nil {
|
||||||
|
resuming = append(resuming, ws)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.resuming = resuming
|
||||||
|
w.substreams = make(map[int64]*watcherStream)
|
||||||
|
|
||||||
|
// connect to grpc stream while accepting watcher cancelation
|
||||||
|
stopc := make(chan struct{})
|
||||||
|
donec := w.waitCancelSubstreams(stopc)
|
||||||
|
wc, err := w.openWatchClient()
|
||||||
|
close(stopc)
|
||||||
|
<-donec
|
||||||
|
|
||||||
|
// serve all non-closing streams, even if there's a client error
|
||||||
|
// so that the teardown path can shutdown the streams as expected.
|
||||||
|
for _, ws := range w.resuming {
|
||||||
|
if ws.closing {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
|
go w.serveSubstream(ws, w.resumec)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, v3rpc.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// receive data from new grpc stream
|
||||||
|
go w.serveWatchClient(wc)
|
||||||
|
return wc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(w.resuming))
|
||||||
|
donec := make(chan struct{})
|
||||||
|
for i := range w.resuming {
|
||||||
|
go func(ws *watcherStream) {
|
||||||
|
defer wg.Done()
|
||||||
|
if ws.closing {
|
||||||
|
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||||
|
close(ws.outc)
|
||||||
|
ws.outc = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ws.initReq.ctx.Done():
|
||||||
|
// closed ws will be removed from resuming
|
||||||
|
ws.closing = true
|
||||||
|
close(ws.outc)
|
||||||
|
ws.outc = nil
|
||||||
|
w.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer w.wg.Done()
|
||||||
|
w.closingc <- ws
|
||||||
|
}()
|
||||||
|
case <-stopc:
|
||||||
|
}
|
||||||
|
}(w.resuming[i])
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
defer close(donec)
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
return donec
|
||||||
|
}
|
||||||
|
|
||||||
|
// joinSubstream waits for all substream goroutines to complete
|
||||||
|
func (w *watchGrpcStream) joinSubstreams() {
|
||||||
|
for _, ws := range w.substreams {
|
||||||
|
<-ws.donec
|
||||||
|
}
|
||||||
|
for _, ws := range w.resuming {
|
||||||
|
if ws != nil {
|
||||||
|
<-ws.donec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// openWatchClient retries opening a watchclient until retryConnection fails
|
||||||
|
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-w.ctx.Done():
|
||||||
|
if err == nil {
|
||||||
|
return nil, w.ctx.Err()
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if isHaltErr(w.ctx, err) {
|
||||||
|
return nil, v3rpc.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ws, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
||||||
|
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||||
|
req := &pb.WatchCreateRequest{
|
||||||
|
StartRevision: wr.rev,
|
||||||
|
Key: []byte(wr.key),
|
||||||
|
RangeEnd: []byte(wr.end),
|
||||||
|
ProgressNotify: wr.progressNotify,
|
||||||
|
Filters: wr.filters,
|
||||||
|
PrevKv: wr.prevKV,
|
||||||
|
}
|
||||||
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
|
return &pb.WatchRequest{RequestUnion: cr}
|
||||||
|
}
|
16
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
|
||||||
|
package rpctypes
|
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
Normal file
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package rpctypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// server-side error
|
||||||
|
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
|
||||||
|
ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found")
|
||||||
|
ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided")
|
||||||
|
ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided")
|
||||||
|
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
|
||||||
|
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
|
||||||
|
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
|
||||||
|
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
||||||
|
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
||||||
|
|
||||||
|
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||||
|
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||||
|
|
||||||
|
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||||
|
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||||
|
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||||
|
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||||
|
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||||
|
|
||||||
|
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||||
|
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||||
|
|
||||||
|
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||||
|
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||||
|
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||||
|
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||||
|
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||||
|
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||||
|
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||||
|
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||||
|
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||||
|
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||||
|
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||||
|
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||||
|
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||||
|
ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management")
|
||||||
|
|
||||||
|
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||||
|
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||||
|
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
|
||||||
|
ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out")
|
||||||
|
ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
|
||||||
|
ErrGRPCTimeoutDueToConnectionLost = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost")
|
||||||
|
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster")
|
||||||
|
|
||||||
|
errStringToError = map[string]error{
|
||||||
|
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||||
|
grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
|
||||||
|
grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
|
||||||
|
grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
|
||||||
|
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
|
||||||
|
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
||||||
|
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||||
|
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||||
|
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
|
||||||
|
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||||
|
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||||
|
grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
|
||||||
|
|
||||||
|
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||||
|
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||||
|
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
|
||||||
|
grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
|
||||||
|
grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
|
||||||
|
grpc.ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
|
||||||
|
}
|
||||||
|
|
||||||
|
// client-side error
|
||||||
|
ErrEmptyKey = Error(ErrGRPCEmptyKey)
|
||||||
|
ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
|
||||||
|
ErrValueProvided = Error(ErrGRPCValueProvided)
|
||||||
|
ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
|
||||||
|
ErrTooManyOps = Error(ErrGRPCTooManyOps)
|
||||||
|
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
|
||||||
|
ErrCompacted = Error(ErrGRPCCompacted)
|
||||||
|
ErrFutureRev = Error(ErrGRPCFutureRev)
|
||||||
|
ErrNoSpace = Error(ErrGRPCNoSpace)
|
||||||
|
|
||||||
|
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||||
|
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||||
|
|
||||||
|
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||||
|
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||||
|
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||||
|
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||||
|
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||||
|
|
||||||
|
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||||
|
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||||
|
|
||||||
|
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||||
|
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||||
|
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||||
|
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||||
|
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||||
|
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||||
|
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||||
|
ErrAuthFailed = Error(ErrGRPCAuthFailed)
|
||||||
|
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||||
|
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||||
|
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||||
|
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||||
|
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||||
|
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
|
||||||
|
|
||||||
|
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||||
|
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||||
|
ErrStopped = Error(ErrGRPCStopped)
|
||||||
|
ErrTimeout = Error(ErrGRPCTimeout)
|
||||||
|
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
|
||||||
|
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
|
||||||
|
ErrUnhealthy = Error(ErrGRPCUnhealthy)
|
||||||
|
)
|
||||||
|
|
||||||
|
// EtcdError defines gRPC server errors.
|
||||||
|
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
|
||||||
|
type EtcdError struct {
|
||||||
|
code codes.Code
|
||||||
|
desc string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns grpc/codes.Code.
|
||||||
|
// TODO: define clientv3/codes.Code.
|
||||||
|
func (e EtcdError) Code() codes.Code {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EtcdError) Error() string {
|
||||||
|
return e.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func Error(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
verr, ok := errStringToError[grpc.ErrorDesc(err)]
|
||||||
|
if !ok { // not gRPC error
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
|
||||||
|
}
|
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
generated
vendored
Normal file
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package rpctypes
|
||||||
|
|
||||||
|
var (
|
||||||
|
MetadataRequireLeaderKey = "hasleader"
|
||||||
|
MetadataHasLeader = "true"
|
||||||
|
)
|
1045
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
Normal file
1045
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2094
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
Normal file
2094
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
16549
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
Normal file
16549
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
3
vendor/github.com/coreos/etcd/main.go
generated
vendored
3
vendor/github.com/coreos/etcd/main.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -20,7 +20,6 @@
|
||||||
// This package should NOT be extended or modified in any way; to modify the
|
// This package should NOT be extended or modified in any way; to modify the
|
||||||
// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
|
// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
|
||||||
//
|
//
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "github.com/coreos/etcd/etcdmain"
|
import "github.com/coreos/etcd/etcdmain"
|
||||||
|
|
735
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
Normal file
735
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,735 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: kv.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package mvccpb is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
kv.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
KeyValue
|
||||||
|
Event
|
||||||
|
*/
|
||||||
|
package mvccpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
math "math"
|
||||||
|
|
||||||
|
io "io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Event_EventType int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
PUT Event_EventType = 0
|
||||||
|
DELETE Event_EventType = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var Event_EventType_name = map[int32]string{
|
||||||
|
0: "PUT",
|
||||||
|
1: "DELETE",
|
||||||
|
}
|
||||||
|
var Event_EventType_value = map[string]int32{
|
||||||
|
"PUT": 0,
|
||||||
|
"DELETE": 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Event_EventType) String() string {
|
||||||
|
return proto.EnumName(Event_EventType_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
|
||||||
|
|
||||||
|
type KeyValue struct {
|
||||||
|
// key is the key in bytes. An empty key is not allowed.
|
||||||
|
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
// create_revision is the revision of last creation on this key.
|
||||||
|
CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
|
||||||
|
// mod_revision is the revision of last modification on this key.
|
||||||
|
ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
|
||||||
|
// version is the version of the key. A deletion resets
|
||||||
|
// the version to zero and any modification of the key
|
||||||
|
// increases its version.
|
||||||
|
Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
|
||||||
|
// value is the value held by the key, in bytes.
|
||||||
|
Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
// lease is the ID of the lease that attached to key.
|
||||||
|
// When the attached lease expires, the key will be deleted.
|
||||||
|
// If lease is 0, then no lease is attached to the key.
|
||||||
|
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *KeyValue) Reset() { *m = KeyValue{} }
|
||||||
|
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*KeyValue) ProtoMessage() {}
|
||||||
|
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
|
||||||
|
|
||||||
|
type Event struct {
|
||||||
|
// type is the kind of event. If type is a PUT, it indicates
|
||||||
|
// new data has been stored to the key. If type is a DELETE,
|
||||||
|
// it indicates the key was deleted.
|
||||||
|
Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
|
||||||
|
// kv holds the KeyValue for the event.
|
||||||
|
// A PUT event contains current kv pair.
|
||||||
|
// A PUT event with kv.Version=1 indicates the creation of a key.
|
||||||
|
// A DELETE/EXPIRE event contains the deleted key with
|
||||||
|
// its modification revision set to the revision of deletion.
|
||||||
|
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
|
||||||
|
// prev_kv holds the key-value pair before the event happens.
|
||||||
|
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Event) Reset() { *m = Event{} }
|
||||||
|
func (m *Event) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Event) ProtoMessage() {}
|
||||||
|
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
|
||||||
|
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
||||||
|
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
||||||
|
}
|
||||||
|
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if m.CreateRevision != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
||||||
|
}
|
||||||
|
if m.ModRevision != 0 {
|
||||||
|
dAtA[i] = 0x18
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
||||||
|
}
|
||||||
|
if m.Version != 0 {
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
||||||
|
}
|
||||||
|
if len(m.Value) > 0 {
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
||||||
|
i += copy(dAtA[i:], m.Value)
|
||||||
|
}
|
||||||
|
if m.Lease != 0 {
|
||||||
|
dAtA[i] = 0x30
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Event) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Type != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
||||||
|
}
|
||||||
|
if m.Kv != nil {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
|
||||||
|
n1, err := m.Kv.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n1
|
||||||
|
}
|
||||||
|
if m.PrevKv != nil {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
|
||||||
|
n2, err := m.PrevKv.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *KeyValue) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovKv(uint64(l))
|
||||||
|
}
|
||||||
|
if m.CreateRevision != 0 {
|
||||||
|
n += 1 + sovKv(uint64(m.CreateRevision))
|
||||||
|
}
|
||||||
|
if m.ModRevision != 0 {
|
||||||
|
n += 1 + sovKv(uint64(m.ModRevision))
|
||||||
|
}
|
||||||
|
if m.Version != 0 {
|
||||||
|
n += 1 + sovKv(uint64(m.Version))
|
||||||
|
}
|
||||||
|
l = len(m.Value)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovKv(uint64(l))
|
||||||
|
}
|
||||||
|
if m.Lease != 0 {
|
||||||
|
n += 1 + sovKv(uint64(m.Lease))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Event) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Type != 0 {
|
||||||
|
n += 1 + sovKv(uint64(m.Type))
|
||||||
|
}
|
||||||
|
if m.Kv != nil {
|
||||||
|
l = m.Kv.Size()
|
||||||
|
n += 1 + l + sovKv(uint64(l))
|
||||||
|
}
|
||||||
|
if m.PrevKv != nil {
|
||||||
|
l = m.PrevKv.Size()
|
||||||
|
n += 1 + l + sovKv(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovKv(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozKv(x uint64) (n int) {
|
||||||
|
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Key == nil {
|
||||||
|
m.Key = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
|
||||||
|
}
|
||||||
|
m.CreateRevision = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
|
||||||
|
}
|
||||||
|
m.ModRevision = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.ModRevision |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
||||||
|
}
|
||||||
|
m.Version = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Version |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Value == nil {
|
||||||
|
m.Value = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 6:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
|
||||||
|
}
|
||||||
|
m.Lease = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Lease |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipKv(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Event) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Event: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||||
|
}
|
||||||
|
m.Type = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Kv == nil {
|
||||||
|
m.Kv = &KeyValue{}
|
||||||
|
}
|
||||||
|
if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.PrevKv == nil {
|
||||||
|
m.PrevKv = &KeyValue{}
|
||||||
|
}
|
||||||
|
if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipKv(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipKv(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthKv
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowKv
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipKv(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
|
||||||
|
|
||||||
|
var fileDescriptorKv = []byte{
|
||||||
|
// 303 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
||||||
|
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
|
||||||
|
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
|
||||||
|
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
|
||||||
|
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
|
||||||
|
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
|
||||||
|
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
|
||||||
|
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
|
||||||
|
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
|
||||||
|
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
|
||||||
|
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
|
||||||
|
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
|
||||||
|
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
|
||||||
|
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
|
||||||
|
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
|
||||||
|
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
|
||||||
|
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
|
||||||
|
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
|
||||||
|
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
|
||||||
|
}
|
140
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
Normal file
140
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package srv looks up DNS SRV records.
|
||||||
|
package srv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// indirection for testing
|
||||||
|
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
|
||||||
|
resolveTCPAddr = net.ResolveTCPAddr
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetCluster gets the cluster information via DNS discovery.
|
||||||
|
// Also sees each entry as a separate instance.
|
||||||
|
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
|
||||||
|
tempName := int(0)
|
||||||
|
tcp2ap := make(map[string]url.URL)
|
||||||
|
|
||||||
|
// First, resolve the apurls
|
||||||
|
for _, url := range apurls {
|
||||||
|
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tcp2ap[tcpAddr.String()] = url
|
||||||
|
}
|
||||||
|
|
||||||
|
stringParts := []string{}
|
||||||
|
updateNodeMap := func(service, scheme string) error {
|
||||||
|
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, srv := range addrs {
|
||||||
|
port := fmt.Sprintf("%d", srv.Port)
|
||||||
|
host := net.JoinHostPort(srv.Target, port)
|
||||||
|
tcpAddr, terr := resolveTCPAddr("tcp", host)
|
||||||
|
if terr != nil {
|
||||||
|
err = terr
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n := ""
|
||||||
|
url, ok := tcp2ap[tcpAddr.String()]
|
||||||
|
if ok {
|
||||||
|
n = name
|
||||||
|
}
|
||||||
|
if n == "" {
|
||||||
|
n = fmt.Sprintf("%d", tempName)
|
||||||
|
tempName++
|
||||||
|
}
|
||||||
|
// SRV records have a trailing dot but URL shouldn't.
|
||||||
|
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||||
|
urlHost := net.JoinHostPort(shortHost, port)
|
||||||
|
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||||
|
if ok && url.Scheme != scheme {
|
||||||
|
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(stringParts) == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
failCount := 0
|
||||||
|
err := updateNodeMap(service+"-ssl", "https")
|
||||||
|
srvErr := make([]string, 2)
|
||||||
|
if err != nil {
|
||||||
|
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
|
||||||
|
failCount++
|
||||||
|
}
|
||||||
|
err = updateNodeMap(service, "http")
|
||||||
|
if err != nil {
|
||||||
|
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
|
||||||
|
failCount++
|
||||||
|
}
|
||||||
|
if failCount == 2 {
|
||||||
|
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
|
||||||
|
}
|
||||||
|
return stringParts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SRVClients struct {
|
||||||
|
Endpoints []string
|
||||||
|
SRVs []*net.SRV
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClient looks up the client endpoints for a service and domain.
|
||||||
|
func GetClient(service, domain string) (*SRVClients, error) {
|
||||||
|
var urls []*url.URL
|
||||||
|
var srvs []*net.SRV
|
||||||
|
|
||||||
|
updateURLs := func(service, scheme string) error {
|
||||||
|
_, addrs, err := lookupSRV(service, "tcp", domain)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, srv := range addrs {
|
||||||
|
urls = append(urls, &url.URL{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
srvs = append(srvs, addrs...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
errHTTPS := updateURLs(service+"-ssl", "https")
|
||||||
|
errHTTP := updateURLs(service, "http")
|
||||||
|
|
||||||
|
if errHTTPS != nil && errHTTP != nil {
|
||||||
|
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoints := make([]string, len(urls))
|
||||||
|
for i := range urls {
|
||||||
|
endpoints[i] = urls[i].String()
|
||||||
|
}
|
||||||
|
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
|
||||||
|
}
|
2
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
2
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
2
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
2
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
14
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
14
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2015 CoreOS, Inc.
|
// Copyright 2015 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -36,8 +36,8 @@ func NewURLs(strs []string) (URLs, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if u.Scheme != "http" && u.Scheme != "https" {
|
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
|
||||||
return nil, fmt.Errorf("URL scheme must be http or https: %s", in)
|
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
|
||||||
}
|
}
|
||||||
if _, _, err := net.SplitHostPort(u.Host); err != nil {
|
if _, _, err := net.SplitHostPort(u.Host); err != nil {
|
||||||
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
|
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
|
||||||
|
@ -53,6 +53,14 @@ func NewURLs(strs []string) (URLs, error) {
|
||||||
return us, nil
|
return us, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MustNewURLs(strs []string) URLs {
|
||||||
|
urls, err := NewURLs(strs)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return urls
|
||||||
|
}
|
||||||
|
|
||||||
func (us URLs) String() string {
|
func (us URLs) String() string {
|
||||||
return strings.Join(us.StringSlice(), ",")
|
return strings.Join(us.StringSlice(), ",")
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue