ACME V2 Integration
This commit is contained in:
parent
d2766b1b4f
commit
16bb9b6836
72 changed files with 11401 additions and 403 deletions
17
Gopkg.lock
generated
17
Gopkg.lock
generated
|
@ -1213,10 +1213,11 @@
|
|||
revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
branch = "acmev2"
|
||||
name = "github.com/xenolf/lego"
|
||||
packages = [
|
||||
"acme",
|
||||
"acmev2",
|
||||
"providers/dns",
|
||||
"providers/dns/auroradns",
|
||||
"providers/dns/azure",
|
||||
|
@ -1243,7 +1244,7 @@
|
|||
"providers/dns/route53",
|
||||
"providers/dns/vultr"
|
||||
]
|
||||
revision = "06a8e7c475c03ef8d4773284ac63357d2810601b"
|
||||
revision = "a149e7d6506feb4003da7093cbf818c6b75ed4a4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -1423,6 +1424,16 @@
|
|||
revision = "aa2e30fdd1fe9dd3394119af66451ae790d50e0d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/square/go-jose.v2"
|
||||
packages = [
|
||||
".",
|
||||
"cipher",
|
||||
"json"
|
||||
]
|
||||
revision = "6ee92191fea850cdcab9a18867abf5f521cdbadb"
|
||||
version = "v2.1.4"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
|
@ -1632,6 +1643,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "ab328aeda9dbd2c4dc87061c25dbfbd151dbdc5946b6f512f676b39bebba8d8e"
|
||||
inputs-digest = "5643c4ca177618882a194021e8894c3dc32950da646048883151bee925416771"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -181,7 +181,7 @@
|
|||
name = "github.com/vulcand/oxy"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
branch = "acmev2"
|
||||
name = "github.com/xenolf/lego"
|
||||
|
||||
[[constraint]]
|
||||
|
|
|
@ -56,7 +56,7 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t
|
|||
|
||||
- Continuously updates its configuration (No restarts!)
|
||||
- Supports multiple load balancing algorithms
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org)
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org) (wildcard certificates support)
|
||||
- Circuit breakers, retry
|
||||
- High Availability with cluster mode (beta)
|
||||
- See the magic through its clean web UI
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
)
|
||||
|
||||
// Account is used to store lets encrypt registration info
|
||||
|
@ -63,15 +63,14 @@ func (a *Account) Init() error {
|
|||
}
|
||||
|
||||
// NewAccount creates an account
|
||||
func NewAccount(email string) (*Account, error) {
|
||||
func NewAccount(email string, certs []*DomainsCertificate) (*Account, error) {
|
||||
// Create a user. New accounts need an email and private key to start
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
domainsCerts := DomainsCertificates{Certs: []*DomainsCertificate{}}
|
||||
|
||||
domainsCerts := DomainsCertificates{Certs: certs}
|
||||
err = domainsCerts.Init()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -211,7 +210,6 @@ func (dc *DomainsCertificates) addCertificateForDomains(acmeCert *Certificate, d
|
|||
|
||||
cert := DomainsCertificate{Domains: domain, Certificate: acmeCert, tlsCert: &tlsCert}
|
||||
dc.Certs = append(dc.Certs, &cert)
|
||||
|
||||
return &cert, nil
|
||||
}
|
||||
|
||||
|
@ -220,10 +218,7 @@ func (dc *DomainsCertificates) getCertificateForDomain(domainToFind string) (*Do
|
|||
defer dc.lock.RUnlock()
|
||||
|
||||
for _, domainsCertificate := range dc.Certs {
|
||||
domains := []string{domainsCertificate.Domains.Main}
|
||||
domains = append(domains, domainsCertificate.Domains.SANs...)
|
||||
|
||||
for _, domain := range domains {
|
||||
for _, domain := range domainsCertificate.Domains.ToStrArray() {
|
||||
if domain == domainToFind {
|
||||
return domainsCertificate, true
|
||||
}
|
||||
|
|
212
acme/acme.go
212
acme/acme.go
|
@ -10,6 +10,7 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -26,7 +27,7 @@ import (
|
|||
"github.com/containous/traefik/tls/generate"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/eapache/channels"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
"github.com/xenolf/lego/providers/dns"
|
||||
)
|
||||
|
||||
|
@ -184,20 +185,30 @@ func (a *ACME) leadershipListener(elected bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transaction, object, err := a.store.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
account := object.(*Account)
|
||||
account.Init()
|
||||
|
||||
var needRegister bool
|
||||
if account == nil || len(account.Email) == 0 {
|
||||
account, err = NewAccount(a.Email)
|
||||
domainsCerts := DomainsCertificates{Certs: []*DomainsCertificate{}}
|
||||
if account != nil {
|
||||
domainsCerts = account.DomainsCertificate
|
||||
}
|
||||
|
||||
account, err = NewAccount(a.Email, domainsCerts.Certs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
needRegister = true
|
||||
}
|
||||
|
||||
a.client, err = a.buildACMEClient(account)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -205,29 +216,15 @@ func (a *ACME) leadershipListener(elected bool) error {
|
|||
if needRegister {
|
||||
// New users will need to register; be sure to save it
|
||||
log.Debug("Register...")
|
||||
reg, err := a.client.Register()
|
||||
|
||||
reg, err := a.client.Register(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
account.Registration = reg
|
||||
}
|
||||
// The client has a URL to the current Let's Encrypt Subscriber
|
||||
// Agreement. The user will need to agree to it.
|
||||
log.Debug("AgreeToTOS...")
|
||||
err = a.client.AgreeToTOS()
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
// Let's Encrypt Subscriber Agreement renew ?
|
||||
reg, err := a.client.QueryRegistration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
account.Registration = reg
|
||||
err = a.client.AgreeToTOS()
|
||||
if err != nil {
|
||||
log.Errorf("Error sending ACME agreement to TOS: %+v: %s", account, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
err = transaction.Commit(account)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -265,36 +262,50 @@ func (a *ACME) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificat
|
|||
func (a *ACME) retrieveCertificates() {
|
||||
a.jobs.In() <- func() {
|
||||
log.Info("Retrieving ACME certificates...")
|
||||
for _, domain := range a.Domains {
|
||||
|
||||
a.deleteUnnecessaryDomains()
|
||||
|
||||
for i := 0; i < len(a.Domains); i++ {
|
||||
domain := a.Domains[i]
|
||||
|
||||
// check if cert isn't already loaded
|
||||
account := a.store.Get().(*Account)
|
||||
if _, exists := account.DomainsCertificate.exists(domain); !exists {
|
||||
domains := []string{}
|
||||
var domains []string
|
||||
domains = append(domains, domain.Main)
|
||||
domains = append(domains, domain.SANs...)
|
||||
domains, err := a.getValidDomains(domains, true)
|
||||
if err != nil {
|
||||
log.Errorf("Error validating ACME certificate for domain %q: %s", domains, err)
|
||||
continue
|
||||
}
|
||||
|
||||
certificateResource, err := a.getDomainsCertificates(domains)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting ACME certificate for domain %s: %s", domains, err.Error())
|
||||
log.Errorf("Error getting ACME certificate for domain %q: %s", domains, err)
|
||||
continue
|
||||
}
|
||||
|
||||
transaction, object, err := a.store.Begin()
|
||||
if err != nil {
|
||||
log.Errorf("Error creating ACME store transaction from domain %s: %s", domain, err.Error())
|
||||
log.Errorf("Error creating ACME store transaction from domain %q: %s", domain, err)
|
||||
continue
|
||||
}
|
||||
|
||||
account = object.(*Account)
|
||||
_, err = account.DomainsCertificate.addCertificateForDomains(certificateResource, domain)
|
||||
if err != nil {
|
||||
log.Errorf("Error adding ACME certificate for domain %s: %s", domains, err.Error())
|
||||
log.Errorf("Error adding ACME certificate for domain %q: %s", domains, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = transaction.Commit(account); err != nil {
|
||||
log.Errorf("Error Saving ACME account %+v: %s", account, err.Error())
|
||||
log.Errorf("Error Saving ACME account %+v: %s", account, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Retrieved ACME certificates")
|
||||
}
|
||||
}
|
||||
|
@ -395,7 +406,7 @@ func dnsOverrideDelay(delay flaeg.Duration) error {
|
|||
|
||||
func (a *ACME) buildACMEClient(account *Account) (*acme.Client, error) {
|
||||
log.Debug("Building ACME client...")
|
||||
caServer := "https://acme-v01.api.letsencrypt.org/directory"
|
||||
caServer := "https://acme-v02.api.letsencrypt.org/directory"
|
||||
if len(a.CAServer) > 0 {
|
||||
caServer = a.CAServer
|
||||
}
|
||||
|
@ -418,11 +429,11 @@ func (a *ACME) buildACMEClient(account *Account) (*acme.Client, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01})
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.HTTP01})
|
||||
err = client.SetChallengeProvider(acme.DNS01, provider)
|
||||
} else if a.HTTPChallenge != nil && len(a.HTTPChallenge.EntryPoint) > 0 {
|
||||
log.Debug("Using HTTP Challenge provider.")
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01})
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.DNS01})
|
||||
a.challengeHTTPProvider = &challengeHTTPProvider{store: a.store}
|
||||
err = client.SetChallengeProvider(acme.HTTP01, a.challengeHTTPProvider)
|
||||
} else {
|
||||
|
@ -467,13 +478,12 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
|||
a.jobs.In() <- func() {
|
||||
log.Debugf("LoadCertificateForDomains %v...", domains)
|
||||
|
||||
if len(domains) == 0 {
|
||||
// no domain
|
||||
domains, err := a.getValidDomains(domains, false)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting valid domain: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
|
||||
operation := func() error {
|
||||
if a.client == nil {
|
||||
return errors.New("ACME client still not built")
|
||||
|
@ -485,7 +495,7 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
|||
}
|
||||
ebo := backoff.NewExponentialBackOff()
|
||||
ebo.MaxElapsedTime = 30 * time.Second
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify)
|
||||
err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting ACME client: %v", err)
|
||||
return
|
||||
|
@ -547,7 +557,7 @@ func searchProvidedCertificateForDomains(domain string, certs map[string]*tls.Ce
|
|||
for certDomains := range certs {
|
||||
domainCheck := false
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.", -1) + "$"
|
||||
domainCheck, _ = regexp.MatchString(selector, domain)
|
||||
if domainCheck {
|
||||
break
|
||||
|
@ -586,31 +596,25 @@ func (a *ACME) getUncheckedDomains(domains []string, account *Account) []string
|
|||
}
|
||||
}
|
||||
|
||||
// Get Configuration Domains
|
||||
for i := 0; i < len(a.Domains); i++ {
|
||||
allCerts[a.Domains[i].Main] = &tls.Certificate{}
|
||||
for _, san := range a.Domains[i].SANs {
|
||||
allCerts[san] = &tls.Certificate{}
|
||||
}
|
||||
}
|
||||
|
||||
return searchUncheckedDomains(domains, allCerts)
|
||||
}
|
||||
|
||||
func searchUncheckedDomains(domains []string, certs map[string]*tls.Certificate) []string {
|
||||
uncheckedDomains := []string{}
|
||||
var uncheckedDomains []string
|
||||
for _, domainToCheck := range domains {
|
||||
domainCheck := false
|
||||
for certDomains := range certs {
|
||||
domainCheck = false
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
// Use regex to test for provided certs that might have been added into TLSConfig
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
domainCheck, _ = regexp.MatchString(selector, domainToCheck)
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !domainCheck {
|
||||
if !isDomainAlreadyChecked(domainToCheck, certs) {
|
||||
uncheckedDomains = append(uncheckedDomains, domainToCheck)
|
||||
}
|
||||
}
|
||||
|
||||
if len(uncheckedDomains) == 0 {
|
||||
log.Debugf("No ACME certificate to generate for domains %q.", domains)
|
||||
} else {
|
||||
|
@ -646,3 +650,107 @@ func (a *ACME) runJobs() {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it
|
||||
func (a *ACME) getValidDomains(domains []string, wildcardAllowed bool) ([]string, error) {
|
||||
if len(domains) == 0 || (len(domains) == 1 && len(domains[0]) == 0) {
|
||||
return nil, errors.New("unable to generate a certificate when no domain is given")
|
||||
}
|
||||
|
||||
if strings.HasPrefix(domains[0], "*") {
|
||||
if !wildcardAllowed {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q from a 'Host' rule", strings.Join(domains, ","))
|
||||
}
|
||||
|
||||
if a.DNSChallenge == nil && len(a.DNSProvider) == 0 {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ","))
|
||||
}
|
||||
|
||||
if len(domains) > 1 {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : SANs are not allowed", strings.Join(domains, ","))
|
||||
}
|
||||
} else {
|
||||
for _, san := range domains[1:] {
|
||||
if strings.HasPrefix(san, "*") {
|
||||
return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
return domains, nil
|
||||
}
|
||||
|
||||
func isDomainAlreadyChecked(domainToCheck string, existentDomains map[string]*tls.Certificate) bool {
|
||||
for certDomains := range existentDomains {
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
// Use regex to test for provided existentDomains that might have been added into TLSConfig
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.", -1) + "$"
|
||||
domainCheck, err := regexp.MatchString(selector, domainToCheck)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to compare %q and %q : %s", domainToCheck, certDomain, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if domainCheck {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// deleteUnnecessaryDomains deletes from the configuration :
|
||||
// - Duplicated domains
|
||||
// - Domains which are checked by wildcard domain
|
||||
func (a *ACME) deleteUnnecessaryDomains() {
|
||||
var newDomains []types.Domain
|
||||
|
||||
for idxDomainToCheck, domainToCheck := range a.Domains {
|
||||
keepDomain := true
|
||||
|
||||
for idxDomain, domain := range a.Domains {
|
||||
if idxDomainToCheck == idxDomain {
|
||||
continue
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(domain, domainToCheck) {
|
||||
if idxDomainToCheck > idxDomain {
|
||||
log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME only once.", domainToCheck)
|
||||
keepDomain = false
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(domain.Main, "*") && domain.SANs == nil {
|
||||
// Check if domains can be validated by the wildcard domain
|
||||
|
||||
var newDomainsToCheck []string
|
||||
|
||||
// Check if domains can be validated by the wildcard domain
|
||||
domainsMap := make(map[string]*tls.Certificate)
|
||||
domainsMap[domain.Main] = &tls.Certificate{}
|
||||
|
||||
for _, domainProcessed := range domainToCheck.ToStrArray() {
|
||||
if isDomainAlreadyChecked(domainProcessed, domainsMap) {
|
||||
log.Warnf("Domain %q will not be processed by ACME because it is validated by the wildcard %q", domainProcessed, domain.Main)
|
||||
continue
|
||||
}
|
||||
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
|
||||
}
|
||||
|
||||
// Delete the domain if both Main and SANs can be validated by the wildcard domain
|
||||
// otherwise keep the unchecked values
|
||||
if newDomainsToCheck == nil {
|
||||
keepDomain = false
|
||||
break
|
||||
}
|
||||
domainToCheck.Set(newDomainsToCheck)
|
||||
}
|
||||
}
|
||||
|
||||
if keepDomain {
|
||||
newDomains = append(newDomains, domainToCheck)
|
||||
}
|
||||
}
|
||||
|
||||
a.Domains = newDomains
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/containous/traefik/tls/generate"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
)
|
||||
|
||||
func TestDomainsSet(t *testing.T) {
|
||||
|
@ -299,10 +299,15 @@ llJh9MC0svjevGtNlxJoE3lmEQIhAKXy1wfZ32/XtcrnENPvi6lzxI0T94X7s5pP3aCoPPoJAiEAl
|
|||
cijFkALeQp/qyeXdFld2v9gUN3eCgljgcl0QweRoIc=---`)
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(`{
|
||||
"new-authz": "https://foo/acme/new-authz",
|
||||
"new-cert": "https://foo/acme/new-cert",
|
||||
"new-reg": "https://foo/acme/new-reg",
|
||||
"revoke-cert": "https://foo/acme/revoke-cert"
|
||||
"GPHhmRVEDas": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417",
|
||||
"keyChange": "https://foo/acme/key-change",
|
||||
"meta": {
|
||||
"termsOfService": "https://boulder:4431/terms/v7"
|
||||
},
|
||||
"newAccount": "https://foo/acme/new-acct",
|
||||
"newNonce": "https://foo/acme/new-nonce",
|
||||
"newOrder": "https://foo/acme/new-order",
|
||||
"revokeCert": "https://foo/acme/revoke-cert"
|
||||
}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
@ -361,3 +366,81 @@ func TestAcme_getProvidedCertificate(t *testing.T) {
|
|||
certificate = a.getProvidedCertificate(domain)
|
||||
assert.Nil(t, certificate)
|
||||
}
|
||||
|
||||
func TestAcme_getValidDomain(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
domains []string
|
||||
wildcardAllowed bool
|
||||
dnsChallenge *acmeprovider.DNSChallenge
|
||||
expectedErr string
|
||||
expectedDomains []string
|
||||
}{
|
||||
{
|
||||
desc: "valid wildcard",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
dnsChallenge: &acmeprovider.DNSChallenge{},
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "",
|
||||
expectedDomains: []string{"*.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "no wildcard",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
dnsChallenge: &acmeprovider.DNSChallenge{},
|
||||
expectedErr: "",
|
||||
wildcardAllowed: true,
|
||||
expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "unauthorized wildcard",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
dnsChallenge: &acmeprovider.DNSChallenge{},
|
||||
wildcardAllowed: false,
|
||||
expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf\" from a 'Host' rule",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "no domain",
|
||||
domains: []string{},
|
||||
dnsChallenge: nil,
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a certificate when no domain is given",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "no DNSChallenge",
|
||||
domains: []string{"*.traefik.wtf", "foo.traefik.wtf"},
|
||||
dnsChallenge: nil,
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf,foo.traefik.wtf\" : ACME needs a DNSChallenge",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "unexpected SANs",
|
||||
domains: []string{"*.traefik.wtf", "foo.traefik.wtf"},
|
||||
dnsChallenge: &acmeprovider.DNSChallenge{},
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf,foo.traefik.wtf\" : SANs are not allowed",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := ACME{}
|
||||
if test.dnsChallenge != nil {
|
||||
a.DNSChallenge = test.dnsChallenge
|
||||
}
|
||||
domains, err := a.getValidDomains(test.domains, test.wildcardAllowed)
|
||||
|
||||
if len(test.expectedErr) > 0 {
|
||||
assert.EqualError(t, err, test.expectedErr, "Unexpected error.")
|
||||
} else {
|
||||
assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/containous/traefik/cluster"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
)
|
||||
|
||||
var _ acme.ChallengeProviderTimeout = (*challengeHTTPProvider)(nil)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider/acme"
|
||||
|
@ -45,19 +46,41 @@ func (s *LocalStore) Get() (*Account, error) {
|
|||
if err := json.Unmarshal(file, &account); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if ACME Account is in ACME V1 format
|
||||
if account != nil && account.Registration != nil {
|
||||
isOldRegistration, err := regexp.MatchString(acme.RegistrationURLPathV1Regexp, account.Registration.URI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isOldRegistration {
|
||||
account.Email = ""
|
||||
account.Registration = nil
|
||||
account.PrivateKey = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// ConvertToNewFormat converts old acme.json format to the new one and store the result into the file (used for the backward compatibility)
|
||||
func ConvertToNewFormat(fileName string) {
|
||||
localStore := acme.NewLocalStore(fileName)
|
||||
|
||||
storeAccount, err := localStore.GetAccount()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to read new account, ACME data conversion is not available : %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
storeCertificates, err := localStore.GetCertificates()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to read new certificates, ACME data conversion is not available : %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if storeAccount == nil {
|
||||
localStore := NewLocalStore(fileName)
|
||||
|
||||
|
@ -67,8 +90,10 @@ func ConvertToNewFormat(fileName string) {
|
|||
return
|
||||
}
|
||||
|
||||
if account != nil {
|
||||
newAccount := &acme.Account{
|
||||
// Convert ACME data from old to new format
|
||||
newAccount := &acme.Account{}
|
||||
if account != nil && len(account.Email) > 0 {
|
||||
newAccount = &acme.Account{
|
||||
PrivateKey: account.PrivateKey,
|
||||
Registration: account.Registration,
|
||||
Email: account.Email,
|
||||
|
@ -82,9 +107,15 @@ func ConvertToNewFormat(fileName string) {
|
|||
Domain: cert.Domains,
|
||||
})
|
||||
}
|
||||
newLocalStore := acme.NewLocalStore(fileName)
|
||||
newLocalStore.SaveDataChan <- &acme.StoredData{Account: newAccount, Certificates: newCertificates}
|
||||
// If account is in the old format, storeCertificates is nil or empty
|
||||
// and has to be initialized
|
||||
storeCertificates = newCertificates
|
||||
}
|
||||
|
||||
// Store the data in new format into the file even if account is nil
|
||||
// to delete Account in ACME v1 format and keeping the certificates
|
||||
newLocalStore := acme.NewLocalStore(fileName)
|
||||
newLocalStore.SaveDataChan <- &acme.StoredData{Account: newAccount, Certificates: storeCertificates}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,15 +133,28 @@ func FromNewToOldFormat(fileName string) (*Account, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Convert ACME Account from new to old format
|
||||
// (Needed by the KV stores)
|
||||
var account *Account
|
||||
if storeAccount != nil {
|
||||
account := &Account{}
|
||||
account.Email = storeAccount.Email
|
||||
account.PrivateKey = storeAccount.PrivateKey
|
||||
account.Registration = storeAccount.Registration
|
||||
account.DomainsCertificate = DomainsCertificates{}
|
||||
account = &Account{
|
||||
Email: storeAccount.Email,
|
||||
PrivateKey: storeAccount.PrivateKey,
|
||||
Registration: storeAccount.Registration,
|
||||
DomainsCertificate: DomainsCertificates{},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert ACME Certificates from new to old format
|
||||
// (Needed by the KV stores)
|
||||
if len(storeCertificates) > 0 {
|
||||
// Account can be nil if data are migrated from new format
|
||||
// with a ACME V1 Account
|
||||
if account == nil {
|
||||
account = &Account{}
|
||||
}
|
||||
for _, cert := range storeCertificates {
|
||||
_, err = account.DomainsCertificate.addCertificateForDomains(&Certificate{
|
||||
_, err := account.DomainsCertificate.addCertificateForDomains(&Certificate{
|
||||
Domain: cert.Domain.Main,
|
||||
Certificate: cert.Certificate,
|
||||
PrivateKey: cert.Key,
|
||||
|
@ -119,7 +163,7 @@ func FromNewToOldFormat(fileName string) (*Account, error) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
|
|
@ -75,36 +75,60 @@ func Run(kv *staert.KvSource, traefikConfiguration *cmd.TraefikConfiguration) fu
|
|||
}
|
||||
|
||||
if traefikConfiguration.GlobalConfiguration.ACME != nil {
|
||||
account := &acme.Account{}
|
||||
|
||||
// Migrate ACME data from file to KV store if needed
|
||||
if len(traefikConfiguration.GlobalConfiguration.ACME.StorageFile) > 0 {
|
||||
return migrateACMEData(traefikConfiguration.GlobalConfiguration.ACME.StorageFile, traefikConfiguration.GlobalConfiguration.ACME.Storage, kv)
|
||||
account, err = migrateACMEData(traefikConfiguration.GlobalConfiguration.ACME.StorageFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Store the ACME Account into the KV Store
|
||||
meta := cluster.NewMetadata(account)
|
||||
err = meta.Marshall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
source := staert.KvSource{
|
||||
Store: kv,
|
||||
Prefix: traefikConfiguration.GlobalConfiguration.ACME.Storage,
|
||||
}
|
||||
|
||||
err = source.StoreConfig(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Force to delete storagefile
|
||||
return kv.Delete(kv.Prefix + "/acme/storagefile")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// migrateACMEData allows migrating data from acme.json file to KV store in function of the file format
|
||||
func migrateACMEData(fileName, storageKey string, kv *staert.KvSource) error {
|
||||
var object cluster.Object
|
||||
func migrateACMEData(fileName string) (*acme.Account, error) {
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
file, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create an empty account to create all the keys into the KV store
|
||||
account := &acme.Account{}
|
||||
// Check if the storage file is not empty before to get data
|
||||
account := &acme.Account{}
|
||||
if len(file) > 0 {
|
||||
accountFromNewFormat, err := acme.FromNewToOldFormat(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if accountFromNewFormat == nil {
|
||||
|
@ -112,7 +136,7 @@ func migrateACMEData(fileName, storageKey string, kv *staert.KvSource) error {
|
|||
localStore := acme.NewLocalStore(fileName)
|
||||
account, err = localStore.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
account = accountFromNewFormat
|
||||
|
@ -122,29 +146,7 @@ func migrateACMEData(fileName, storageKey string, kv *staert.KvSource) error {
|
|||
}
|
||||
|
||||
err = account.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
object = account
|
||||
meta := cluster.NewMetadata(object)
|
||||
err = meta.Marshall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
source := staert.KvSource{
|
||||
Store: kv,
|
||||
Prefix: storageKey,
|
||||
}
|
||||
|
||||
err = source.StoreConfig(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Force to delete storagefile
|
||||
return kv.Delete(kv.Prefix + "/acme/storagefile")
|
||||
return account, err
|
||||
}
|
||||
|
||||
// CreateKvSource creates KvSource
|
||||
|
|
|
@ -82,11 +82,12 @@ entryPoint = "https"
|
|||
# - Leave comment to go to prod.
|
||||
#
|
||||
# Optional
|
||||
# Default: "https://acme-v01.api.letsencrypt.org/directory"
|
||||
# Default: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
#
|
||||
# caServer = "https://acme-staging.api.letsencrypt.org/directory"
|
||||
# caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
# Domains list.
|
||||
# Only domains defined here can generate wildcard certificates.
|
||||
#
|
||||
# [[acme.domains]]
|
||||
# main = "local1.com"
|
||||
|
@ -111,7 +112,8 @@ entryPoint = "https"
|
|||
#
|
||||
entryPoint = "http"
|
||||
|
||||
# Use a DNS-01 acme challenge rather than HTTP-01 challenge.
|
||||
# Use a DNS-01/DNS-02 acme challenge rather than HTTP-01 challenge.
|
||||
# Note : Mandatory for wildcard certificates generation.
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
|
@ -137,6 +139,10 @@ entryPoint = "https"
|
|||
If `HTTP-01` challenge is used, `acme.httpChallenge.entryPoint` has to be defined and reachable by Let's Encrypt through the port 80.
|
||||
These are Let's Encrypt limitations as described on the [community forum](https://community.letsencrypt.org/t/support-for-ports-other-than-80-and-443/3419/72).
|
||||
|
||||
!!! note
|
||||
Wildcard certificates can be generated only if `acme.dnsChallenge`
|
||||
option is enable.
|
||||
|
||||
### Let's Encrypt downtime
|
||||
|
||||
Let's Encrypt functionality will be limited until Træfik is restarted.
|
||||
|
@ -215,7 +221,7 @@ Because KV stores (like Consul) have limited entries size, the certificates list
|
|||
!!! note
|
||||
It's possible to store up to approximately 100 ACME certificates in Consul.
|
||||
|
||||
### `acme.httpChallenge`
|
||||
### `httpChallenge`
|
||||
|
||||
Use `HTTP-01` challenge to generate/renew ACME certificates.
|
||||
|
||||
|
@ -256,9 +262,9 @@ defaultEntryPoints = ["http", "https"]
|
|||
`acme.httpChallenge.entryPoint` has to be reachable by Let's Encrypt through the port 80.
|
||||
It's a Let's Encrypt limitation as described on the [community forum](https://community.letsencrypt.org/t/support-for-ports-other-than-80-and-443/3419/72).
|
||||
|
||||
### `acme.dnsChallenge`
|
||||
### `dnsChallenge`
|
||||
|
||||
Use `DNS-01` challenge to generate/renew ACME certificates.
|
||||
Use `DNS-01/DNS-02` challenge to generate/renew ACME certificates.
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
|
@ -269,6 +275,9 @@ Use `DNS-01` challenge to generate/renew ACME certificates.
|
|||
# ...
|
||||
```
|
||||
|
||||
!!! note
|
||||
ACME wildcard certificates can only be generated thanks to a `DNS-02` challenge.
|
||||
|
||||
#### `provider`
|
||||
|
||||
Select the provider that matches the DNS domain that will host the challenge TXT record, and provide environment variables to enable setting it:
|
||||
|
@ -348,12 +357,16 @@ This will request a certificate from Let's Encrypt for each frontend with a Host
|
|||
|
||||
For example, a rule `Host:test1.traefik.io,test2.traefik.io` will request a certificate with main domain `test1.traefik.io` and SAN `test2.traefik.io`.
|
||||
|
||||
!!! warning
|
||||
`onHostRule` option can not be used to generate wildcard certificates.
|
||||
Refer to [the wildcard generation section](/configuration/acme/#wildcard-domain) for more information.
|
||||
|
||||
### `caServer`
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
# ...
|
||||
caServer = "https://acme-staging.api.letsencrypt.org/directory"
|
||||
caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
# ...
|
||||
```
|
||||
|
||||
|
@ -362,7 +375,7 @@ CA server to use.
|
|||
- Uncomment the line to run on the staging Let's Encrypt server.
|
||||
- Leave comment to go to prod.
|
||||
|
||||
### `acme.domains`
|
||||
### `domains`
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
|
@ -376,10 +389,22 @@ CA server to use.
|
|||
[[acme.domains]]
|
||||
main = "local3.com"
|
||||
[[acme.domains]]
|
||||
main = "local4.com"
|
||||
main = "*.local4.com"
|
||||
# ...
|
||||
```
|
||||
|
||||
#### Wildcard domains
|
||||
|
||||
Wildcard domain has to be defined as a main domain **with no SANs** (alternative domains).
|
||||
All domains must have A/AAAA records pointing to Træfik.
|
||||
|
||||
!!! warning
|
||||
Note that Let's Encrypt has [rate limiting](https://letsencrypt.org/docs/rate-limits).
|
||||
|
||||
Each domain & SANs will lead to a certificate request.
|
||||
|
||||
#### Others domains
|
||||
|
||||
You can provide SANs (alternative domains) to each main domain.
|
||||
All domains must have A/AAAA records pointing to Træfik.
|
||||
|
||||
|
@ -391,9 +416,47 @@ Each domain & SANs will lead to a certificate request.
|
|||
### `dnsProvider` (Deprecated)
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated, use [dnsChallenge.provider](/configuration/acme/#acmednschallenge) instead.
|
||||
This option is deprecated, use [dnsChallenge.provider](/configuration/acme/#dnschallenge) instead.
|
||||
|
||||
### `delayDontCheckDNS` (Deprecated)
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated, use [dnsChallenge.delayBeforeCheck](/configuration/acme/#acmednschallenge) instead.
|
||||
This option is deprecated, use [dnsChallenge.delayBeforeCheck](/configuration/acme/#dnschallenge) instead.
|
||||
|
||||
## Wildcard certificates
|
||||
|
||||
[ACME V2](https://community.letsencrypt.org/t/acme-v2-and-wildcard-certificate-support-is-live/55579) allows wildcard certificate support.
|
||||
However, this feature needs a specific configuration.
|
||||
|
||||
### DNS-02 Challenge
|
||||
|
||||
As described in [Let's Encrypt post](https://community.letsencrypt.org/t/staging-endpoint-for-acme-v2/49605), wildcard certificates can only be generated through a `DNS-02`Challenge.
|
||||
This challenge is linked to the Træfik option `acme.dnsChallenge`.
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
# ...
|
||||
[acme.dnsChallenge]
|
||||
provider = "digitalocean"
|
||||
delayBeforeCheck = 0
|
||||
# ...
|
||||
```
|
||||
|
||||
For more information about this option, please refer to the [dnsChallenge section](/configuration/acme/#dnschallenge).
|
||||
|
||||
### Wildcard domain
|
||||
|
||||
Wildcard domains can currently be provided only by to the `acme.domains` option.
|
||||
Theses domains can not have SANs.
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
# ...
|
||||
[[acme.domains]]
|
||||
main = "*local1.com"
|
||||
[[acme.domains]]
|
||||
main = "*.local2.com"
|
||||
# ...
|
||||
```
|
||||
|
||||
For more information about this option, please refer to the [domains section](/configuration/acme/#domains).
|
||||
|
|
|
@ -34,7 +34,7 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t
|
|||
|
||||
- Continuously updates its configuration (No restarts!)
|
||||
- Supports multiple load balancing algorithms
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org)
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org) (wildcard certificates support)
|
||||
- Circuit breakers, retry
|
||||
- High Availability with cluster mode (beta)
|
||||
- See the magic through its clean web UI
|
||||
|
|
|
@ -55,10 +55,6 @@ defaultEntryPoints = ["http", "https"]
|
|||
|
||||
## Let's Encrypt support
|
||||
|
||||
!!! note
|
||||
Even if `TLS-SNI-01` challenge is [disabled](https://community.letsencrypt.org/t/2018-01-11-update-regarding-acme-tls-sni-and-shared-hosting-infrastructure/50188), for the moment, it stays the _by default_ ACME Challenge in Træfik but all the examples use the `HTTP-01` challenge (except DNS challenge examples).
|
||||
If `TLS-SNI-01` challenge is not re-enabled in the future, it we will be removed from Træfik.
|
||||
|
||||
### Basic example with HTTP challenge
|
||||
|
||||
```toml
|
||||
|
@ -190,10 +186,45 @@ entryPoint = "https"
|
|||
```
|
||||
|
||||
DNS challenge needs environment variables to be executed.
|
||||
These variables have to be set on the machine/container which host Træfik.
|
||||
These variables have to be set on the machine/container that host Træfik.
|
||||
|
||||
These variables are described [in this section](/configuration/acme/#provider).
|
||||
|
||||
### DNS challenge with wildcard domains
|
||||
|
||||
```toml
|
||||
[entryPoints]
|
||||
[entryPoints.https]
|
||||
address = ":443"
|
||||
[entryPoints.https.tls]
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "acme.json"
|
||||
caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
entryPoint = "https"
|
||||
[acme.dnsChallenge]
|
||||
provider = "digitalocean" # DNS Provider name (cloudflare, OVH, gandi...)
|
||||
delayBeforeCheck = 0
|
||||
|
||||
[[acme.domains]]
|
||||
main = "*.local1.com"
|
||||
[[acme.domains]]
|
||||
main = "local2.com"
|
||||
sans = ["test1.local2.com", "test2x.local2.com"]
|
||||
[[acme.domains]]
|
||||
main = "*.local3.com"
|
||||
[[acme.domains]]
|
||||
main = "*.local4.com"
|
||||
```
|
||||
|
||||
DNS challenge needs environment variables to be executed.
|
||||
These variables have to be set on the machine/container that host Træfik.
|
||||
|
||||
These variables are described [in this section](/configuration/acme/#provider).
|
||||
|
||||
More information about wildcard certificates are available [in this section](/configuration/acme/#wildcard-domain).
|
||||
|
||||
### OnHostRule option and provided certificates (with HTTP challenge)
|
||||
|
||||
```toml
|
||||
|
|
|
@ -17,7 +17,7 @@ storage = "/etc/traefik/conf/acme.json"
|
|||
entryPoint = "https"
|
||||
onDemand = false
|
||||
OnHostRule = true
|
||||
caServer = "http://traefik.boulder.com:4000/directory"
|
||||
caServer = "http://traefik.boulder.com:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
|
|
@ -3,40 +3,50 @@ version: "2"
|
|||
services :
|
||||
|
||||
boulder:
|
||||
image: containous/boulder:containous-fork
|
||||
# To minimize fetching this should be the same version used below
|
||||
image: containous/boulder:containous-acmev2
|
||||
environment:
|
||||
FAKE_DNS: 172.17.0.1
|
||||
PKCS11_PROXY_SOCKET: tcp://boulder-hsm:5657
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- le.wtf:127.0.0.1
|
||||
- boulder:127.0.0.1
|
||||
ports:
|
||||
- 4000:4000 # ACME
|
||||
- 4001:4001 # ACMEv2
|
||||
- 4002:4002 # OCSP
|
||||
- 4003:4003 # OCSP
|
||||
- 4430:4430 # ACME via HTTPS
|
||||
- 4431:4431 # ACMEv2 via HTTPS
|
||||
- 4500:4500 # ct-test-srv
|
||||
- 6000:6000 # gsb-test-srv
|
||||
- 8000:8000 # debug ports
|
||||
- 8001:8001
|
||||
- 8002:8002
|
||||
- 8003:8003
|
||||
- 8004:8004
|
||||
- 8005:8005
|
||||
- 8006:8006
|
||||
- 8008:8008
|
||||
- 8009:8009
|
||||
- 8010:8010
|
||||
- 8055:8055 # dns-test-srv updates
|
||||
- 9380:9380 # mail-test-srv
|
||||
- 9381:9381 # mail-test-srv
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
networks:
|
||||
- default
|
||||
|
||||
bhsm:
|
||||
image: letsencrypt/boulder-tools:2016-11-02
|
||||
# To minimize fetching this should be the same version used above
|
||||
image: letsencrypt/boulder-tools:2018-03-07
|
||||
hostname: boulder-hsm
|
||||
environment:
|
||||
PKCS11_DAEMON_SOCKET: tcp://0.0.0.0:5657
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm.so
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm2.so
|
||||
expose:
|
||||
- 5657
|
||||
networks:
|
||||
|
@ -49,21 +59,14 @@ services :
|
|||
hostname: boulder-mysql
|
||||
environment:
|
||||
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||
command: mysqld --bind-address=0.0.0.0
|
||||
logging:
|
||||
driver: none
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- boulder-mysql
|
||||
|
||||
brabbitmq:
|
||||
image: rabbitmq:3-alpine
|
||||
hostname: boulder-rabbitmq
|
||||
environment:
|
||||
RABBITMQ_NODE_IP_ADDRESS: "0.0.0.0"
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- boulder-rabbitmq
|
||||
|
||||
## TRAEFIK part ##
|
||||
|
||||
traefik:
|
||||
|
|
|
@ -32,7 +32,7 @@ init_environment() {
|
|||
start_boulder() {
|
||||
init_environment
|
||||
echo "Start boulder environment"
|
||||
up_environment bmysql brabbitmq bhsm boulder
|
||||
up_environment bmysql bhsm boulder
|
||||
waiting_counter=12
|
||||
# Not start Traefik if boulder is not started
|
||||
echo "WAIT for boulder..."
|
||||
|
|
|
@ -7,193 +7,195 @@ services:
|
|||
# CONSUL
|
||||
|
||||
consul:
|
||||
image: progrium/consul
|
||||
command: -server -bootstrap -log-level debug -ui-dir /ui
|
||||
ports:
|
||||
- "8400:8400"
|
||||
- "8500:8500"
|
||||
- "8600:53/udp"
|
||||
expose:
|
||||
- "8300"
|
||||
- "8301"
|
||||
- "8301/udp"
|
||||
- "8302"
|
||||
- "8302/udp"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.2
|
||||
image: progrium/consul
|
||||
command: -server -bootstrap -log-level debug -ui-dir /ui
|
||||
ports:
|
||||
- "8400:8400"
|
||||
- "8500:8500"
|
||||
- "8600:53/udp"
|
||||
expose:
|
||||
- "8300"
|
||||
- "8301"
|
||||
- "8301/udp"
|
||||
- "8302"
|
||||
- "8302/udp"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.2
|
||||
|
||||
# ETCD V3
|
||||
|
||||
etcd3:
|
||||
image: quay.io/coreos/etcd:v3.2.9
|
||||
command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://10.0.1.12:2380 --listen-peer-urls http://10.0.1.12:2380 --advertise-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --listen-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --initial-cluster node1=http://10.0.1.12:2380 --debug
|
||||
ports:
|
||||
- "4001:4001"
|
||||
- "2380:2380"
|
||||
- "2379:2379"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.12
|
||||
image: quay.io/coreos/etcd:v3.2.9
|
||||
command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://10.0.1.12:2380 --listen-peer-urls http://10.0.1.12:2380 --advertise-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --listen-client-urls http://10.0.1.12:2379,http://10.0.1.12:4001 --initial-cluster node1=http://10.0.1.12:2380 --debug
|
||||
ports:
|
||||
- "4001:4001"
|
||||
- "2380:2380"
|
||||
- "2379:2379"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.12
|
||||
|
||||
etcdctl-ping:
|
||||
image: tenstartups/etcdctl
|
||||
command: --endpoints=[10.0.1.12:2379] get "traefik/acme/storage"
|
||||
environment:
|
||||
ETCDCTL_DIAL_: "TIMEOUT 10s"
|
||||
ETCDCTL_API : "3"
|
||||
networks:
|
||||
image: tenstartups/etcdctl
|
||||
command: --endpoints=[10.0.1.12:2379] get "traefik/acme/storage"
|
||||
environment:
|
||||
ETCDCTL_DIAL_: "TIMEOUT 10s"
|
||||
ETCDCTL_API : "3"
|
||||
networks:
|
||||
- net
|
||||
|
||||
## BOULDER part ##
|
||||
|
||||
boulder:
|
||||
image: containous/boulder:containous-fork
|
||||
environment:
|
||||
FAKE_DNS: 172.17.0.1
|
||||
PKCS11_PROXY_SOCKET: tcp://boulder-hsm:5657
|
||||
extra_hosts:
|
||||
- le.wtf:127.0.0.1
|
||||
- boulder:127.0.0.1
|
||||
ports:
|
||||
- 4000:4000 # ACME
|
||||
- 4002:4002 # OCSP
|
||||
- 4003:4003 # OCSP
|
||||
- 4500:4500 # ct-test-srv
|
||||
- 8000:8000 # debug ports
|
||||
- 8001:8001
|
||||
- 8002:8002
|
||||
- 8003:8003
|
||||
- 8004:8004
|
||||
- 8055:8055 # dns-test-srv updates
|
||||
- 9380:9380 # mail-test-srv
|
||||
- 9381:9381 # mail-test-srv
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.3
|
||||
# To minimize fetching this should be the same version used below
|
||||
image: containous/boulder:containous-acmev2
|
||||
environment:
|
||||
FAKE_DNS: 172.17.0.1
|
||||
PKCS11_PROXY_SOCKET: tcp://boulder-hsm:5657
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- le.wtf:127.0.0.1
|
||||
- boulder:127.0.0.1
|
||||
ports:
|
||||
- 4000:4000 # ACME
|
||||
- 4001:4001 # ACMEv2
|
||||
- 4002:4002 # OCSP
|
||||
- 4003:4003 # OCSP
|
||||
- 4430:4430 # ACME via HTTPS
|
||||
- 4431:4431 # ACMEv2 via HTTPS
|
||||
- 4500:4500 # ct-test-srv
|
||||
- 6000:6000 # gsb-test-srv
|
||||
- 8000:8000 # debug ports
|
||||
- 8001:8001
|
||||
- 8002:8002
|
||||
- 8003:8003
|
||||
- 8004:8004
|
||||
- 8005:8005
|
||||
- 8006:8006
|
||||
- 8008:8008
|
||||
- 8009:8009
|
||||
- 8010:8010
|
||||
- 8055:8055 # dns-test-srv updates
|
||||
- 9380:9380 # mail-test-srv
|
||||
- 9381:9381 # mail-test-srv
|
||||
depends_on:
|
||||
- bhsm
|
||||
- bmysql
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.3
|
||||
|
||||
bhsm:
|
||||
image: letsencrypt/boulder-tools:2016-11-02
|
||||
hostname: boulder-hsm
|
||||
environment:
|
||||
PKCS11_DAEMON_SOCKET: tcp://0.0.0.0:5657
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm.so
|
||||
expose:
|
||||
- 5657
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.4
|
||||
aliases:
|
||||
- boulder-hsm
|
||||
# To minimize fetching this should be the same version used above
|
||||
image: letsencrypt/boulder-tools:2018-03-07
|
||||
hostname: boulder-hsm
|
||||
environment:
|
||||
PKCS11_DAEMON_SOCKET: tcp://0.0.0.0:5657
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm2.so
|
||||
expose:
|
||||
- 5657
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.4
|
||||
aliases:
|
||||
- boulder-hsm
|
||||
bmysql:
|
||||
image: mariadb:10.1
|
||||
hostname: boulder-mysql
|
||||
environment:
|
||||
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.5
|
||||
aliases:
|
||||
- boulder-mysql
|
||||
|
||||
brabbitmq:
|
||||
image: rabbitmq:3-alpine
|
||||
hostname: boulder-rabbitmq
|
||||
environment:
|
||||
RABBITMQ_NODE_IP_ADDRESS: "0.0.0.0"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.6
|
||||
aliases:
|
||||
- boulder-rabbitmq
|
||||
image: mariadb:10.1
|
||||
hostname: boulder-mysql
|
||||
environment:
|
||||
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||
command: mysqld --bind-address=0.0.0.0
|
||||
logging:
|
||||
driver: none
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.5
|
||||
aliases:
|
||||
- boulder-mysql
|
||||
|
||||
## TRAEFIK part ##
|
||||
|
||||
traefik-storeconfig:
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
volumes:
|
||||
- "./traefik.toml:/traefik.toml:ro"
|
||||
command: storeconfig --debug
|
||||
networks:
|
||||
- net
|
||||
storeconfig:
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
volumes:
|
||||
- "./traefik.toml:/traefik.toml:ro"
|
||||
command: storeconfig --debug
|
||||
networks:
|
||||
- net
|
||||
|
||||
traefik01:
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
command: ${TRAEFIK_CMD}
|
||||
extra_hosts:
|
||||
- traefik.boulder.com:172.17.0.1
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
expose:
|
||||
- "443"
|
||||
- "5001"
|
||||
- "5002"
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8080:8080"
|
||||
- "443:443"
|
||||
- "5001:443" # Needed for SNI challenge
|
||||
- "5002:80" # Needed for HTTP challenge
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.8
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
command: ${TRAEFIK_CMD}
|
||||
extra_hosts:
|
||||
- traefik.boulder.com:172.17.0.1
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
expose:
|
||||
- "443"
|
||||
- "5001"
|
||||
- "5002"
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8080:8080"
|
||||
- "443:443"
|
||||
- "5001:443" # Needed for SNI challenge
|
||||
- "5002:80" # Needed for HTTP challenge
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.8
|
||||
|
||||
traefik02:
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
command: ${TRAEFIK_CMD}
|
||||
extra_hosts:
|
||||
- traefik.boulder.com:172.17.0.1
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
expose:
|
||||
- "443"
|
||||
- "5001"
|
||||
- "5002"
|
||||
ports:
|
||||
- "88:80"
|
||||
- "8888:8080"
|
||||
- "8443:443"
|
||||
depends_on:
|
||||
- traefik01
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.9
|
||||
build:
|
||||
context: ../..
|
||||
image: containous/traefik
|
||||
command: ${TRAEFIK_CMD}
|
||||
extra_hosts:
|
||||
- traefik.boulder.com:172.17.0.1
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
expose:
|
||||
- "443"
|
||||
- "5001"
|
||||
- "5002"
|
||||
ports:
|
||||
- "88:80"
|
||||
- "8888:8080"
|
||||
- "8443:443"
|
||||
depends_on:
|
||||
- traefik01
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.9
|
||||
|
||||
whoami01:
|
||||
image: emilevauge/whoami
|
||||
expose:
|
||||
- "80"
|
||||
labels:
|
||||
- "traefik.port=80"
|
||||
- "traefik.backend=wam01"
|
||||
- "traefik.frontend.rule=Host:who01.localhost.com"
|
||||
- "traefik.enable=true"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.10
|
||||
image: emilevauge/whoami
|
||||
expose:
|
||||
- "80"
|
||||
labels:
|
||||
- "traefik.port=80"
|
||||
- "traefik.backend=wam01"
|
||||
- "traefik.frontend.rule=Host:who01.localhost.com"
|
||||
- "traefik.enable=true"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.10
|
||||
|
||||
whoami02:
|
||||
image: emilevauge/whoami
|
||||
expose:
|
||||
- "80"
|
||||
labels:
|
||||
- "traefik.port=80"
|
||||
- "traefik.backend=wam02"
|
||||
- "traefik.frontend.rule=Host:who02.localhost.com"
|
||||
- "traefik.enable=true"
|
||||
networks:
|
||||
- net
|
||||
image: emilevauge/whoami
|
||||
expose:
|
||||
- "80"
|
||||
labels:
|
||||
- "traefik.port=80"
|
||||
- "traefik.backend=wam02"
|
||||
- "traefik.frontend.rule=Host:who02.localhost.com"
|
||||
- "traefik.enable=true"
|
||||
networks:
|
||||
- net
|
||||
|
||||
networks:
|
||||
net:
|
||||
|
|
|
@ -74,10 +74,10 @@ start_storeconfig_consul() {
|
|||
endpoint = "10.0.1.2:8500"
|
||||
watch = true
|
||||
prefix = "traefik"' >> $basedir/traefik.toml
|
||||
up_environment traefik-storeconfig
|
||||
up_environment storeconfig
|
||||
rm -f $basedir/traefik.toml
|
||||
waiting_counter=5
|
||||
delete_services traefik-storeconfig
|
||||
delete_services storeconfig
|
||||
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ start_storeconfig_etcd3() {
|
|||
watch = true
|
||||
prefix = "/traefik"
|
||||
useAPIV3 = true' >> $basedir/traefik.toml
|
||||
up_environment traefik-storeconfig
|
||||
up_environment storeconfig
|
||||
rm -f $basedir/traefik.toml
|
||||
waiting_counter=5
|
||||
# Don't start Traefik store config if ETCD3 is not started
|
||||
|
@ -99,7 +99,7 @@ start_storeconfig_etcd3() {
|
|||
sleep 5
|
||||
let waiting_counter-=1
|
||||
done
|
||||
delete_services traefik-storeconfig etcdctl-ping
|
||||
delete_services storeconfig etcdctl-ping
|
||||
}
|
||||
|
||||
start_traefik() {
|
||||
|
@ -136,11 +136,11 @@ start_traefik() {
|
|||
# Start boulder services
|
||||
start_boulder() {
|
||||
echo "Start boulder environment"
|
||||
up_environment bmysql brabbitmq bhsm boulder
|
||||
up_environment bmysql bhsm boulder
|
||||
waiting_counter=12
|
||||
# Not start Traefik if boulder is not started
|
||||
echo "WAIT for boulder..."
|
||||
while [[ -z $(curl -s http://10.0.1.3:4000/directory) ]]; do
|
||||
while [[ -z $(curl -s http://10.0.1.3:4001/directory) ]]; do
|
||||
sleep 5
|
||||
let waiting_counter-=1
|
||||
if [[ $waiting_counter -eq 0 ]]; then
|
||||
|
|
|
@ -14,7 +14,7 @@ email = "test@traefik.io"
|
|||
storage = "traefik/acme/account"
|
||||
entryPoint = "https"
|
||||
OnHostRule = true
|
||||
caServer = "http://traefik.boulder.com:4000/directory"
|
||||
caServer = "http://traefik.boulder.com:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ func (s *AcmeSuite) SetUpSuite(c *check.C) {
|
|||
s.boulderIP = s.composeProject.Container(c, "boulder").NetworkSettings.IPAddress
|
||||
|
||||
// wait for boulder
|
||||
err := try.GetRequest("http://"+s.boulderIP+":4000/directory", 120*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
err := try.GetRequest("http://"+s.boulderIP+":4001/directory", 120*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ email = "test@traefik.io"
|
|||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
OnHostRule = true
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
# No challenge defined
|
||||
|
||||
[file]
|
||||
|
|
|
@ -17,7 +17,7 @@ email = "test@traefik.io"
|
|||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
OnHostRule = true
|
||||
caServer = "http://wrongurl:4000/directory"
|
||||
caServer = "http://wrongurl:4001/directory"
|
||||
|
||||
[file]
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
[[acme.domains]]
|
||||
|
|
|
@ -16,7 +16,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = false
|
||||
OnHostRule = false
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
[[acme.domains]]
|
||||
|
|
|
@ -16,7 +16,7 @@ storage = "/dev/null"
|
|||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
caServer = "http://{{.BoulderHost}}:4001/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
boulder:
|
||||
image: containous/boulder:containous-fork
|
||||
image: containous/boulder:containous-acmev2
|
||||
environment:
|
||||
FAKE_DNS: ${DOCKER_HOST_IP}
|
||||
PKCS11_PROXY_SOCKET: tcp://boulder-hsm:5657
|
||||
|
@ -8,37 +8,42 @@ boulder:
|
|||
- boulder:127.0.0.1
|
||||
ports:
|
||||
- 4000:4000 # ACME
|
||||
- 4001:4001 # ACMEv2
|
||||
- 4002:4002 # OCSP
|
||||
- 4003:4003 # OCSP
|
||||
- 4430:4430 # ACME via HTTPS
|
||||
- 4431:4431 # ACMEv2 via HTTPS
|
||||
- 4500:4500 # ct-test-srv
|
||||
- 6000:6000 # gsb-test-srv
|
||||
- 8000:8000 # debug ports
|
||||
- 8001:8001
|
||||
- 8002:8002
|
||||
- 8003:8003
|
||||
- 8004:8004
|
||||
- 8005:8005
|
||||
- 8006:8006
|
||||
- 8008:8008
|
||||
- 8009:8009
|
||||
- 8010:8010
|
||||
- 8055:8055 # dns-test-srv updates
|
||||
- 9380:9380 # mail-test-srv
|
||||
- 9381:9381 # mail-test-srv
|
||||
links:
|
||||
- bhsm:boulder-hsm
|
||||
- bmysql:boulder-mysql
|
||||
- brabbitmq:boulder-rabbitmq
|
||||
|
||||
bhsm:
|
||||
# To minimize the fetching of various layers this should match
|
||||
# the FROM image and tag in boulder/Dockerfile
|
||||
image: letsencrypt/boulder-tools:2016-11-02
|
||||
image: letsencrypt/boulder-tools:2018-03-07
|
||||
environment:
|
||||
PKCS11_DAEMON_SOCKET: tcp://0.0.0.0:5657
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm.so
|
||||
command: /usr/local/bin/pkcs11-daemon /usr/lib/softhsm/libsofthsm2.so
|
||||
expose:
|
||||
- 5657
|
||||
bmysql:
|
||||
image: mariadb:10.1
|
||||
environment:
|
||||
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||
command: mysqld --bind-address=0.0.0.0
|
||||
log_driver: none
|
||||
brabbitmq:
|
||||
image: rabbitmq:3-alpine
|
||||
environment:
|
||||
RABBITMQ_NODE_IP_ADDRESS: "0.0.0.0"
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"crypto/x509"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
)
|
||||
|
||||
// Account is used to store lets encrypt registration info
|
||||
|
@ -17,6 +17,11 @@ type Account struct {
|
|||
PrivateKey []byte
|
||||
}
|
||||
|
||||
const (
|
||||
// RegistrationURLPathV1Regexp is a regexp which match ACME registration URL in the V1 format
|
||||
RegistrationURLPathV1Regexp string = `^.*/acme/reg/\d+$`
|
||||
)
|
||||
|
||||
// NewAccount creates an account
|
||||
func NewAccount(email string) (*Account, error) {
|
||||
// Create a user. New accounts need an email and private key to start
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/containous/flaeg"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
)
|
||||
|
||||
func dnsOverrideDelay(delay flaeg.Duration) error {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
|
@ -45,6 +46,17 @@ func (s *LocalStore) get() (*StoredData, error) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
// Check if ACME Account is in ACME V1 format
|
||||
if s.storedData.Account != nil && s.storedData.Account.Registration != nil {
|
||||
isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isOldRegistration {
|
||||
s.storedData.Account = nil
|
||||
s.SaveDataChan <- s.storedData
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s.storedData, nil
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
traefikTLS "github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xenolf/lego/acme"
|
||||
acme "github.com/xenolf/lego/acmev2"
|
||||
"github.com/xenolf/lego/providers/dns"
|
||||
)
|
||||
|
||||
|
@ -45,7 +45,7 @@ type Configuration struct {
|
|||
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` //deprecated
|
||||
DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"`
|
||||
HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"`
|
||||
Domains []types.Domain `description:"SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='main.net,san1.net,san2.net'"`
|
||||
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"`
|
||||
}
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
|
@ -142,9 +142,9 @@ func (p *Provider) ListenConfiguration(config types.Configuration) {
|
|||
p.configFromListenerChan <- config
|
||||
}
|
||||
|
||||
// ListenRequest resolves new certificates for a domain from an incoming request and retrun a valid Certificate to serve (onDemand option)
|
||||
// ListenRequest resolves new certificates for a domain from an incoming request and return a valid Certificate to serve (onDemand option)
|
||||
func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) {
|
||||
acmeCert, err := p.resolveCertificate(types.Domain{Main: domain})
|
||||
acmeCert, err := p.resolveCertificate(types.Domain{Main: domain}, false)
|
||||
if acmeCert == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func (p *Provider) watchNewDomains() {
|
|||
}
|
||||
|
||||
safe.Go(func() {
|
||||
if _, err := p.resolveCertificate(domain); err != nil {
|
||||
if _, err := p.resolveCertificate(domain, false); err != nil {
|
||||
log.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err)
|
||||
}
|
||||
})
|
||||
|
@ -207,16 +207,14 @@ func (p *Provider) SetStaticCertificates(staticCerts map[string]*tls.Certificate
|
|||
p.staticCerts = staticCerts
|
||||
}
|
||||
|
||||
func (p *Provider) resolveCertificate(domain types.Domain) (*acme.CertificateResource, error) {
|
||||
domains := []string{domain.Main}
|
||||
domains = append(domains, domain.SANs...)
|
||||
if len(domains) == 0 {
|
||||
return nil, nil
|
||||
func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) {
|
||||
domains, err := p.getValidDomains(domain, domainFromConfigurationFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
|
||||
// Check provided certificates
|
||||
uncheckedDomains := p.getUncheckedDomains(domains)
|
||||
uncheckedDomains := p.getUncheckedDomains(domains, !domainFromConfigurationFile)
|
||||
if len(uncheckedDomains) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -255,7 +253,7 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
}
|
||||
|
||||
log.Debug("Building ACME client...")
|
||||
caServer := "https://acme-v01.api.letsencrypt.org/directory"
|
||||
caServer := "https://acme-v02.api.letsencrypt.org/directory"
|
||||
if len(p.CAServer) > 0 {
|
||||
caServer = p.CAServer
|
||||
}
|
||||
|
@ -267,28 +265,13 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
if account.GetRegistration() == nil {
|
||||
// New users will need to register; be sure to save it
|
||||
log.Info("Register...")
|
||||
reg, err := client.Register()
|
||||
reg, err := client.Register(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account.Registration = reg
|
||||
}
|
||||
|
||||
log.Debug("AgreeToTOS...")
|
||||
err = client.AgreeToTOS()
|
||||
if err != nil {
|
||||
// Let's Encrypt Subscriber Agreement renew ?
|
||||
reg, err := client.QueryRegistration()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account.Registration = reg
|
||||
err = client.AgreeToTOS()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error sending ACME agreement to TOS: %+v: %v", account, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the account once before all the certificates generation/storing
|
||||
// No certificate can be generated if account is not initialized
|
||||
err = p.Store.SaveAccount(account)
|
||||
|
@ -310,14 +293,14 @@ func (p *Provider) getClient() (*acme.Client, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01})
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.HTTP01})
|
||||
err = client.SetChallengeProvider(acme.DNS01, provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if p.HTTPChallenge != nil && len(p.HTTPChallenge.EntryPoint) > 0 {
|
||||
log.Debug("Using HTTP Challenge provider.")
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01})
|
||||
client.ExcludeChallenges([]acme.Challenge{acme.DNS01})
|
||||
err = client.SetChallengeProvider(acme.HTTP01, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -353,12 +336,13 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
|||
p.configurationChan = configurationChan
|
||||
p.refreshCertificates()
|
||||
|
||||
for _, domain := range p.Domains {
|
||||
p.deleteUnnecessaryDomains()
|
||||
for i := 0; i < len(p.Domains); i++ {
|
||||
domain := p.Domains[i]
|
||||
safe.Go(func() {
|
||||
if _, err := p.resolveCertificate(domain); err != nil {
|
||||
domains := []string{domain.Main}
|
||||
domains = append(domains, domain.SANs...)
|
||||
log.Errorf("Unable to obtain ACME certificate for domains %q : %v", domains, err)
|
||||
if _, err := p.resolveCertificate(domain, true); err != nil {
|
||||
log.Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err)
|
||||
} else {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -506,51 +490,48 @@ func (p *Provider) AddRoutes(router *mux.Router) {
|
|||
|
||||
// Get provided certificate which check a domains list (Main and SANs)
|
||||
// from static and dynamic provided certificates
|
||||
func (p *Provider) getUncheckedDomains(domains []string) []string {
|
||||
log.Debugf("Looking for provided certificate(s) to validate %q...", domains)
|
||||
allCerts := make(map[string]*tls.Certificate)
|
||||
func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurationDomains bool) []string {
|
||||
log.Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck)
|
||||
var allCerts []string
|
||||
|
||||
// Get static certificates
|
||||
for domains, certificate := range p.staticCerts {
|
||||
allCerts[domains] = certificate
|
||||
for domains := range p.staticCerts {
|
||||
allCerts = append(allCerts, domains)
|
||||
}
|
||||
|
||||
// Get dynamic certificates
|
||||
if p.dynamicCerts != nil && p.dynamicCerts.Get() != nil {
|
||||
for domains, certificate := range p.dynamicCerts.Get().(map[string]*tls.Certificate) {
|
||||
allCerts[domains] = certificate
|
||||
for domains := range p.dynamicCerts.Get().(map[string]*tls.Certificate) {
|
||||
allCerts = append(allCerts, domains)
|
||||
}
|
||||
}
|
||||
|
||||
return searchUncheckedDomains(domains, allCerts)
|
||||
// Get ACME certificates
|
||||
for _, certificate := range p.certificates {
|
||||
allCerts = append(allCerts, strings.Join(certificate.Domain.ToStrArray(), ","))
|
||||
}
|
||||
|
||||
// Get Configuration Domains
|
||||
if checkConfigurationDomains {
|
||||
for i := 0; i < len(p.Domains); i++ {
|
||||
allCerts = append(allCerts, strings.Join(p.Domains[i].ToStrArray(), ","))
|
||||
}
|
||||
}
|
||||
|
||||
return searchUncheckedDomains(domainsToCheck, allCerts)
|
||||
}
|
||||
|
||||
func searchUncheckedDomains(domains []string, certs map[string]*tls.Certificate) []string {
|
||||
func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) []string {
|
||||
uncheckedDomains := []string{}
|
||||
for _, domainToCheck := range domains {
|
||||
domainCheck := false
|
||||
for certDomains := range certs {
|
||||
domainCheck = false
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
// Use regex to test for provided certs that might have been added into TLSConfig
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
domainCheck, _ = regexp.MatchString(selector, domainToCheck)
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !domainCheck {
|
||||
for _, domainToCheck := range domainsToCheck {
|
||||
if !isDomainAlreadyChecked(domainToCheck, existentDomains) {
|
||||
uncheckedDomains = append(uncheckedDomains, domainToCheck)
|
||||
}
|
||||
}
|
||||
if len(uncheckedDomains) == 0 {
|
||||
log.Debugf("No ACME certificate to generate for domains %q.", domains)
|
||||
log.Debugf("No ACME certificate to generate for domains %q.", domainsToCheck)
|
||||
} else {
|
||||
log.Debugf("Domains %q need ACME certificates generation for domains %q.", domains, strings.Join(uncheckedDomains, ","))
|
||||
log.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ","))
|
||||
}
|
||||
return uncheckedDomains
|
||||
}
|
||||
|
@ -571,3 +552,98 @@ func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) {
|
|||
}
|
||||
return crt, err
|
||||
}
|
||||
|
||||
// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it
|
||||
func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([]string, error) {
|
||||
domains := domain.ToStrArray()
|
||||
if len(domains) == 0 {
|
||||
return nil, errors.New("unable to generate a certificate in ACME provider when no domain is given")
|
||||
}
|
||||
if strings.HasPrefix(domain.Main, "*") {
|
||||
if !wildcardAllowed {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q from a 'Host' rule", strings.Join(domains, ","))
|
||||
}
|
||||
if p.DNSChallenge == nil {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ","))
|
||||
}
|
||||
if len(domain.SANs) > 0 {
|
||||
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : SANs are not allowed", strings.Join(domains, ","))
|
||||
}
|
||||
} else {
|
||||
for _, san := range domain.SANs {
|
||||
if strings.HasPrefix(san, "*") {
|
||||
return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
return domains, nil
|
||||
}
|
||||
|
||||
func isDomainAlreadyChecked(domainToCheck string, existentDomains []string) bool {
|
||||
for _, certDomains := range existentDomains {
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
// Use regex to test for provided existentDomains that might have been added into TLSConfig
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.", -1) + "$"
|
||||
domainCheck, err := regexp.MatchString(selector, domainToCheck)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to compare %q and %q in ACME provider : %s", domainToCheck, certDomain, err)
|
||||
continue
|
||||
}
|
||||
if domainCheck {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// deleteUnnecessaryDomains deletes from the configuration :
|
||||
// - Duplicated domains
|
||||
// - Domains which are checked by wildcard domain
|
||||
func (p *Provider) deleteUnnecessaryDomains() {
|
||||
var newDomains []types.Domain
|
||||
|
||||
for idxDomainToCheck, domainToCheck := range p.Domains {
|
||||
keepDomain := true
|
||||
|
||||
for idxDomain, domain := range p.Domains {
|
||||
if idxDomainToCheck == idxDomain {
|
||||
continue
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(domain, domainToCheck) {
|
||||
if idxDomainToCheck > idxDomain {
|
||||
log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck)
|
||||
keepDomain = false
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(domain.Main, "*") && domain.SANs == nil {
|
||||
|
||||
// Check if domains can be validated by the wildcard domain
|
||||
var newDomainsToCheck []string
|
||||
for _, domainProcessed := range domainToCheck.ToStrArray() {
|
||||
if isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) {
|
||||
log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
|
||||
continue
|
||||
}
|
||||
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
|
||||
}
|
||||
|
||||
// Delete the domain if both Main and SANs can be validated by the wildcard domain
|
||||
// otherwise keep the unchecked values
|
||||
if newDomainsToCheck == nil {
|
||||
keepDomain = false
|
||||
break
|
||||
}
|
||||
domainToCheck.Set(newDomainsToCheck)
|
||||
}
|
||||
}
|
||||
|
||||
if keepDomain {
|
||||
newDomains = append(newDomains, domainToCheck)
|
||||
}
|
||||
}
|
||||
|
||||
p.Domains = newDomains
|
||||
}
|
||||
|
|
342
provider/acme/provider_test.go
Normal file
342
provider/acme/provider_test.go
Normal file
|
@ -0,0 +1,342 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetUncheckedCertificates(t *testing.T) {
|
||||
wildcardMap := make(map[string]*tls.Certificate)
|
||||
wildcardMap["*.traefik.wtf"] = &tls.Certificate{}
|
||||
|
||||
wildcardSafe := &safe.Safe{}
|
||||
wildcardSafe.Set(wildcardMap)
|
||||
|
||||
domainMap := make(map[string]*tls.Certificate)
|
||||
domainMap["traefik.wtf"] = &tls.Certificate{}
|
||||
|
||||
domainSafe := &safe.Safe{}
|
||||
domainSafe.Set(domainMap)
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
dynamicCerts *safe.Safe
|
||||
staticCerts map[string]*tls.Certificate
|
||||
acmeCertificates []*Certificate
|
||||
domains []string
|
||||
expectedDomains []string
|
||||
}{
|
||||
{
|
||||
desc: "wildcard to generate",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
expectedDomains: []string{"*.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "wildcard already exists in dynamic certificates",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
dynamicCerts: wildcardSafe,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "wildcard already exists in static certificates",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
staticCerts: wildcardMap,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "wildcard already exists in ACME certificates",
|
||||
domains: []string{"*.traefik.wtf"},
|
||||
acmeCertificates: []*Certificate{
|
||||
{
|
||||
Domain: types.Domain{Main: "*.traefik.wtf"},
|
||||
},
|
||||
},
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain CN and SANs to generate",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "domain CN already exists in dynamic certificates and SANs to generate",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
dynamicCerts: domainSafe,
|
||||
expectedDomains: []string{"foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "domain CN already exists in static certificates and SANs to generate",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
staticCerts: domainMap,
|
||||
expectedDomains: []string{"foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "domain CN already exists in ACME certificates and SANs to generate",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
acmeCertificates: []*Certificate{
|
||||
{
|
||||
Domain: types.Domain{Main: "traefik.wtf"},
|
||||
},
|
||||
},
|
||||
expectedDomains: []string{"foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "domain already exists in dynamic certificates",
|
||||
domains: []string{"traefik.wtf"},
|
||||
dynamicCerts: domainSafe,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain already exists in static certificates",
|
||||
domains: []string{"traefik.wtf"},
|
||||
staticCerts: domainMap,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain already exists in ACME certificates",
|
||||
domains: []string{"traefik.wtf"},
|
||||
acmeCertificates: []*Certificate{
|
||||
{
|
||||
Domain: types.Domain{Main: "traefik.wtf"},
|
||||
},
|
||||
},
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain matched by wildcard in dynamic certificates",
|
||||
domains: []string{"who.traefik.wtf", "foo.traefik.wtf"},
|
||||
dynamicCerts: wildcardSafe,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain matched by wildcard in static certificates",
|
||||
domains: []string{"who.traefik.wtf", "foo.traefik.wtf"},
|
||||
staticCerts: wildcardMap,
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "domain matched by wildcard in ACME certificates",
|
||||
domains: []string{"who.traefik.wtf", "foo.traefik.wtf"},
|
||||
acmeCertificates: []*Certificate{
|
||||
{
|
||||
Domain: types.Domain{Main: "*.traefik.wtf"},
|
||||
},
|
||||
},
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "root domain with wildcard in ACME certificates",
|
||||
domains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
acmeCertificates: []*Certificate{
|
||||
{
|
||||
Domain: types.Domain{Main: "*.traefik.wtf"},
|
||||
},
|
||||
},
|
||||
expectedDomains: []string{"traefik.wtf"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
acmeProvider := Provider{
|
||||
dynamicCerts: test.dynamicCerts,
|
||||
staticCerts: test.staticCerts,
|
||||
certificates: test.acmeCertificates,
|
||||
}
|
||||
|
||||
domains := acmeProvider.getUncheckedDomains(test.domains, false)
|
||||
assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidDomain(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
domains types.Domain
|
||||
wildcardAllowed bool
|
||||
dnsChallenge *DNSChallenge
|
||||
expectedErr string
|
||||
expectedDomains []string
|
||||
}{
|
||||
{
|
||||
desc: "valid wildcard",
|
||||
domains: types.Domain{Main: "*.traefik.wtf"},
|
||||
dnsChallenge: &DNSChallenge{},
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "",
|
||||
expectedDomains: []string{"*.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "no wildcard",
|
||||
domains: types.Domain{Main: "traefik.wtf", SANs: []string{"foo.traefik.wtf"}},
|
||||
dnsChallenge: &DNSChallenge{},
|
||||
expectedErr: "",
|
||||
wildcardAllowed: true,
|
||||
expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"},
|
||||
},
|
||||
{
|
||||
desc: "unauthorized wildcard",
|
||||
domains: types.Domain{Main: "*.traefik.wtf"},
|
||||
dnsChallenge: &DNSChallenge{},
|
||||
wildcardAllowed: false,
|
||||
expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf\" from a 'Host' rule",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "no domain",
|
||||
domains: types.Domain{},
|
||||
dnsChallenge: nil,
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a certificate in ACME provider when no domain is given",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "no DNSChallenge",
|
||||
domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}},
|
||||
dnsChallenge: nil,
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf,foo.traefik.wtf\" : ACME needs a DNSChallenge",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "unexpected SANs",
|
||||
domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}},
|
||||
dnsChallenge: &DNSChallenge{},
|
||||
wildcardAllowed: true,
|
||||
expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf,foo.traefik.wtf\" : SANs are not allowed",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
acmeProvider := Provider{Configuration: &Configuration{DNSChallenge: test.dnsChallenge}}
|
||||
|
||||
domains, err := acmeProvider.getValidDomains(test.domains, test.wildcardAllowed)
|
||||
|
||||
if len(test.expectedErr) > 0 {
|
||||
assert.EqualError(t, err, test.expectedErr, "Unexpected error.")
|
||||
} else {
|
||||
assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUnnecessaryDomains(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
domains []types.Domain
|
||||
expectedDomains []types.Domain
|
||||
}{
|
||||
{
|
||||
desc: "no domain to delete",
|
||||
domains: []types.Domain{
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "foo.bar"},
|
||||
},
|
||||
{
|
||||
Main: "*.foo.acme.wtf",
|
||||
},
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "bar.foo"},
|
||||
},
|
||||
},
|
||||
expectedDomains: []types.Domain{
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "foo.bar"},
|
||||
},
|
||||
{
|
||||
Main: "*.foo.acme.wtf",
|
||||
},
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "bar.foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "2 domains with same values",
|
||||
domains: []types.Domain{
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "foo.bar"},
|
||||
},
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "foo.bar"},
|
||||
},
|
||||
},
|
||||
expectedDomains: []types.Domain{
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "foo.bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "domain totally checked by wildcard",
|
||||
domains: []types.Domain{
|
||||
{
|
||||
Main: "who.acme.wtf",
|
||||
SANs: []string{"traefik.acme.wtf", "bar.acme.wtf"},
|
||||
},
|
||||
{
|
||||
Main: "*.acme.wtf",
|
||||
},
|
||||
},
|
||||
expectedDomains: []types.Domain{
|
||||
{
|
||||
Main: "*.acme.wtf",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "domain partially checked by wildcard",
|
||||
domains: []types.Domain{
|
||||
{
|
||||
Main: "traefik.acme.wtf",
|
||||
SANs: []string{"acme.wtf", "foo.bar"},
|
||||
},
|
||||
{
|
||||
Main: "*.acme.wtf",
|
||||
},
|
||||
},
|
||||
expectedDomains: []types.Domain{
|
||||
{
|
||||
Main: "acme.wtf",
|
||||
SANs: []string{"foo.bar"},
|
||||
},
|
||||
{
|
||||
Main: "*.acme.wtf",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
acmeProvider := Provider{Configuration: &Configuration{Domains: test.domains}}
|
||||
|
||||
acmeProvider.deleteUnnecessaryDomains()
|
||||
assert.Equal(t, test.expectedDomains, acmeProvider.Domains, "unexpected domain")
|
||||
})
|
||||
}
|
||||
}
|
90
types/domain_test.go
Normal file
90
types/domain_test.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDomain_ToStrArray(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
domain Domain
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
desc: "with Main and SANs",
|
||||
domain: Domain{
|
||||
Main: "foo.com",
|
||||
SANs: []string{"bar.foo.com", "bir.foo.com"},
|
||||
},
|
||||
expected: []string{"foo.com", "bar.foo.com", "bir.foo.com"},
|
||||
},
|
||||
{
|
||||
desc: "without SANs",
|
||||
domain: Domain{
|
||||
Main: "foo.com",
|
||||
},
|
||||
expected: []string{"foo.com"},
|
||||
},
|
||||
{
|
||||
desc: "without Main",
|
||||
domain: Domain{
|
||||
SANs: []string{"bar.foo.com", "bir.foo.com"},
|
||||
},
|
||||
expected: []string{"bar.foo.com", "bir.foo.com"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
domains := test.domain.ToStrArray()
|
||||
assert.EqualValues(t, test.expected, domains)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDomain_Set(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
rawDomains []string
|
||||
expected Domain
|
||||
}{
|
||||
{
|
||||
desc: "with 3 domains",
|
||||
rawDomains: []string{"foo.com", "bar.foo.com", "bir.foo.com"},
|
||||
expected: Domain{
|
||||
Main: "foo.com",
|
||||
SANs: []string{"bar.foo.com", "bir.foo.com"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with 1 domain",
|
||||
rawDomains: []string{"foo.com"},
|
||||
expected: Domain{
|
||||
Main: "foo.com",
|
||||
SANs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "",
|
||||
rawDomains: nil,
|
||||
expected: Domain{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
domain := Domain{}
|
||||
domain.Set(test.rawDomains)
|
||||
|
||||
assert.Equal(t, test.expected, domain)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -11,6 +11,23 @@ type Domain struct {
|
|||
SANs []string
|
||||
}
|
||||
|
||||
// ToStrArray convert a domain into an array of strings
|
||||
func (d *Domain) ToStrArray() []string {
|
||||
var domains []string
|
||||
if len(d.Main) > 0 {
|
||||
domains = []string{d.Main}
|
||||
}
|
||||
return append(domains, d.SANs...)
|
||||
}
|
||||
|
||||
// Set sets a domains from an array of strings
|
||||
func (d *Domain) Set(domains []string) {
|
||||
if len(domains) > 0 {
|
||||
d.Main = domains[0]
|
||||
d.SANs = domains[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Domains parse []Domain
|
||||
type Domains []Domain
|
||||
|
||||
|
|
13
vendor/github.com/xenolf/lego/acmev2/challenges.go
generated
vendored
Normal file
13
vendor/github.com/xenolf/lego/acmev2/challenges.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package acme
|
||||
|
||||
// Challenge is a string that identifies a particular type and version of ACME challenge.
|
||||
type Challenge string
|
||||
|
||||
const (
|
||||
// HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http
|
||||
// Note: HTTP01ChallengePath returns the URL path to fulfill this challenge
|
||||
HTTP01 = Challenge("http-01")
|
||||
// DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns
|
||||
// Note: DNS01Record returns a DNS record which will fulfill this challenge
|
||||
DNS01 = Challenge("dns-01")
|
||||
)
|
801
vendor/github.com/xenolf/lego/acmev2/client.go
generated
vendored
Normal file
801
vendor/github.com/xenolf/lego/acmev2/client.go
generated
vendored
Normal file
|
@ -0,0 +1,801 @@
|
|||
// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers.
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Logger is an optional custom logger.
|
||||
Logger *log.Logger
|
||||
)
|
||||
|
||||
const (
|
||||
// maxBodySize is the maximum size of body that we will read.
|
||||
maxBodySize = 1024 * 1024
|
||||
|
||||
// overallRequestLimit is the overall number of request per second limited on the
|
||||
// “new-reg”, “new-authz” and “new-cert” endpoints. From the documentation the
|
||||
// limitation is 20 requests per second, but using 20 as value doesn't work but 18 do
|
||||
overallRequestLimit = 18
|
||||
)
|
||||
|
||||
// logf writes a log entry. It uses Logger if not
|
||||
// nil, otherwise it uses the default log.Logger.
|
||||
func logf(format string, args ...interface{}) {
|
||||
if Logger != nil {
|
||||
Logger.Printf(format, args...)
|
||||
} else {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// User interface is to be implemented by users of this library.
|
||||
// It is used by the client type to get user specific information.
|
||||
type User interface {
|
||||
GetEmail() string
|
||||
GetRegistration() *RegistrationResource
|
||||
GetPrivateKey() crypto.PrivateKey
|
||||
}
|
||||
|
||||
// Interface for all challenge solvers to implement.
|
||||
type solver interface {
|
||||
Solve(challenge challenge, domain string) error
|
||||
}
|
||||
|
||||
type validateFunc func(j *jws, domain, uri string, chlng challenge) error
|
||||
|
||||
// Client is the user-friendy way to ACME
|
||||
type Client struct {
|
||||
directory directory
|
||||
user User
|
||||
jws *jws
|
||||
keyType KeyType
|
||||
solvers map[Challenge]solver
|
||||
}
|
||||
|
||||
// NewClient creates a new ACME client on behalf of the user. The client will depend on
|
||||
// the ACME directory located at caDirURL for the rest of its actions. A private
|
||||
// key of type keyType (see KeyType contants) will be generated when requesting a new
|
||||
// certificate if one isn't provided.
|
||||
func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) {
|
||||
privKey := user.GetPrivateKey()
|
||||
if privKey == nil {
|
||||
return nil, errors.New("private key was nil")
|
||||
}
|
||||
|
||||
var dir directory
|
||||
if _, err := getJSON(caDirURL, &dir); err != nil {
|
||||
return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err)
|
||||
}
|
||||
|
||||
if dir.NewAccountURL == "" {
|
||||
return nil, errors.New("directory missing new registration URL")
|
||||
}
|
||||
if dir.NewOrderURL == "" {
|
||||
return nil, errors.New("directory missing new order URL")
|
||||
}
|
||||
/*if dir.RevokeCertURL == "" {
|
||||
return nil, errors.New("directory missing revoke certificate URL")
|
||||
}*/
|
||||
|
||||
jws := &jws{privKey: privKey, getNonceURL: dir.NewNonceURL}
|
||||
if reg := user.GetRegistration(); reg != nil {
|
||||
jws.kid = reg.URI
|
||||
}
|
||||
|
||||
// REVIEW: best possibility?
|
||||
// Add all available solvers with the right index as per ACME
|
||||
// spec to this map. Otherwise they won`t be found.
|
||||
solvers := make(map[Challenge]solver)
|
||||
solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}}
|
||||
|
||||
return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil
|
||||
}
|
||||
|
||||
// SetChallengeProvider specifies a custom provider p that can solve the given challenge type.
|
||||
func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error {
|
||||
switch challenge {
|
||||
case HTTP01:
|
||||
c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p}
|
||||
case DNS01:
|
||||
c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p}
|
||||
default:
|
||||
return fmt.Errorf("Unknown challenge %v", challenge)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges.
|
||||
// If this option is not used, the default port 80 and all interfaces will be used.
|
||||
// To only specify a port and no interface use the ":port" notation.
|
||||
//
|
||||
// NOTE: This REPLACES any custom HTTP provider previously set by calling
|
||||
// c.SetChallengeProvider with the default HTTP challenge provider.
|
||||
func (c *Client) SetHTTPAddress(iface string) error {
|
||||
host, port, err := net.SplitHostPort(iface)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if chlng, ok := c.solvers[HTTP01]; ok {
|
||||
chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExcludeChallenges explicitly removes challenges from the pool for solving.
|
||||
func (c *Client) ExcludeChallenges(challenges []Challenge) {
|
||||
// Loop through all challenges and delete the requested one if found.
|
||||
for _, challenge := range challenges {
|
||||
delete(c.solvers, challenge)
|
||||
}
|
||||
}
|
||||
|
||||
// GetToSURL returns the current ToS URL from the Directory
|
||||
func (c *Client) GetToSURL() string {
|
||||
return c.directory.Meta.TermsOfService
|
||||
}
|
||||
|
||||
// Register the current account to the ACME server.
|
||||
func (c *Client) Register(tosAgreed bool) (*RegistrationResource, error) {
|
||||
if c == nil || c.user == nil {
|
||||
return nil, errors.New("acme: cannot register a nil client or user")
|
||||
}
|
||||
logf("[INFO] acme: Registering account for %s", c.user.GetEmail())
|
||||
|
||||
accMsg := accountMessage{}
|
||||
if c.user.GetEmail() != "" {
|
||||
accMsg.Contact = []string{"mailto:" + c.user.GetEmail()}
|
||||
} else {
|
||||
accMsg.Contact = []string{}
|
||||
}
|
||||
accMsg.TermsOfServiceAgreed = tosAgreed
|
||||
|
||||
var serverReg accountMessage
|
||||
hdr, err := postJSON(c.jws, c.directory.NewAccountURL, accMsg, &serverReg)
|
||||
if err != nil {
|
||||
remoteErr, ok := err.(RemoteError)
|
||||
if ok && remoteErr.StatusCode == 409 {
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
reg := &RegistrationResource{
|
||||
URI: hdr.Get("Location"),
|
||||
Body: serverReg,
|
||||
}
|
||||
c.jws.kid = reg.URI
|
||||
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
// ResolveAccountByKey will attempt to look up an account using the given account key
|
||||
// and return its registration resource.
|
||||
func (c *Client) ResolveAccountByKey() (*RegistrationResource, error) {
|
||||
logf("[INFO] acme: Trying to resolve account by key")
|
||||
|
||||
acc := accountMessage{OnlyReturnExisting: true}
|
||||
hdr, err := postJSON(c.jws, c.directory.NewAccountURL, acc, &acc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accountLink := hdr.Get("Location")
|
||||
if accountLink == "" {
|
||||
return nil, errors.New("Server did not return the account link")
|
||||
}
|
||||
|
||||
var retAccount accountMessage
|
||||
c.jws.kid = accountLink
|
||||
hdr, err = postJSON(c.jws, accountLink, accountMessage{}, &retAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RegistrationResource{URI: accountLink, Body: retAccount}, nil
|
||||
}
|
||||
|
||||
// DeleteRegistration deletes the client's user registration from the ACME
|
||||
// server.
|
||||
func (c *Client) DeleteRegistration() error {
|
||||
if c == nil || c.user == nil {
|
||||
return errors.New("acme: cannot unregister a nil client or user")
|
||||
}
|
||||
logf("[INFO] acme: Deleting account for %s", c.user.GetEmail())
|
||||
|
||||
accMsg := accountMessage{
|
||||
Status: "deactivated",
|
||||
}
|
||||
|
||||
_, err := postJSON(c.jws, c.user.GetRegistration().URI, accMsg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryRegistration runs a POST request on the client's registration and
|
||||
// returns the result.
|
||||
//
|
||||
// This is similar to the Register function, but acting on an existing
|
||||
// registration link and resource.
|
||||
func (c *Client) QueryRegistration() (*RegistrationResource, error) {
|
||||
if c == nil || c.user == nil {
|
||||
return nil, errors.New("acme: cannot query the registration of a nil client or user")
|
||||
}
|
||||
// Log the URL here instead of the email as the email may not be set
|
||||
logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI)
|
||||
|
||||
accMsg := accountMessage{}
|
||||
|
||||
var serverReg accountMessage
|
||||
_, err := postJSON(c.jws, c.user.GetRegistration().URI, accMsg, &serverReg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reg := &RegistrationResource{Body: serverReg}
|
||||
|
||||
// Location: header is not returned so this needs to be populated off of
|
||||
// existing URI
|
||||
reg.URI = c.user.GetRegistration().URI
|
||||
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
// ObtainCertificateForCSR tries to obtain a certificate matching the CSR passed into it.
|
||||
// The domains are inferred from the CommonName and SubjectAltNames, if any. The private key
|
||||
// for this CSR is not required.
|
||||
// If bundle is true, the []byte contains both the issuer certificate and
|
||||
// your issued certificate as a bundle.
|
||||
// This function will never return a partial certificate. If one domain in the list fails,
|
||||
// the whole certificate will fail.
|
||||
func (c *Client) ObtainCertificateForCSR(csr x509.CertificateRequest, bundle bool) (CertificateResource, map[string]error) {
|
||||
// figure out what domains it concerns
|
||||
// start with the common name
|
||||
domains := []string{csr.Subject.CommonName}
|
||||
|
||||
// loop over the SubjectAltName DNS names
|
||||
DNSNames:
|
||||
for _, sanName := range csr.DNSNames {
|
||||
for _, existingName := range domains {
|
||||
if existingName == sanName {
|
||||
// duplicate; skip this name
|
||||
continue DNSNames
|
||||
}
|
||||
}
|
||||
|
||||
// name is unique
|
||||
domains = append(domains, sanName)
|
||||
}
|
||||
|
||||
if bundle {
|
||||
logf("[INFO][%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", "))
|
||||
} else {
|
||||
logf("[INFO][%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", "))
|
||||
}
|
||||
|
||||
order, err := c.createOrderForIdentifiers(domains)
|
||||
if err != nil {
|
||||
identErrors := make(map[string]error)
|
||||
for _, auth := range order.Identifiers {
|
||||
identErrors[auth.Value] = err
|
||||
}
|
||||
return CertificateResource{}, identErrors
|
||||
}
|
||||
authz, failures := c.getAuthzForOrder(order)
|
||||
// If any challenge fails - return. Do not generate partial SAN certificates.
|
||||
if len(failures) > 0 {
|
||||
/*for _, auth := range authz {
|
||||
c.disableAuthz(auth)
|
||||
}*/
|
||||
|
||||
return CertificateResource{}, failures
|
||||
}
|
||||
|
||||
errs := c.solveChallengeForAuthz(authz)
|
||||
// If any challenge fails - return. Do not generate partial SAN certificates.
|
||||
if len(errs) > 0 {
|
||||
return CertificateResource{}, errs
|
||||
}
|
||||
|
||||
logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
|
||||
|
||||
cert, err := c.requestCertificateForCsr(order, bundle, csr.Raw, nil)
|
||||
if err != nil {
|
||||
for _, chln := range authz {
|
||||
failures[chln.Identifier.Value] = err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the CSR to the certificate so that it can be used for renewals.
|
||||
cert.CSR = pemEncode(&csr)
|
||||
|
||||
return cert, failures
|
||||
}
|
||||
|
||||
// ObtainCertificate tries to obtain a single certificate using all domains passed into it.
|
||||
// The first domain in domains is used for the CommonName field of the certificate, all other
|
||||
// domains are added using the Subject Alternate Names extension. A new private key is generated
|
||||
// for every invocation of this function. If you do not want that you can supply your own private key
|
||||
// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one.
|
||||
// If bundle is true, the []byte contains both the issuer certificate and
|
||||
// your issued certificate as a bundle.
|
||||
// This function will never return a partial certificate. If one domain in the list fails,
|
||||
// the whole certificate will fail.
|
||||
func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, map[string]error) {
|
||||
if bundle {
|
||||
logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", "))
|
||||
} else {
|
||||
logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", "))
|
||||
}
|
||||
|
||||
order, err := c.createOrderForIdentifiers(domains)
|
||||
if err != nil {
|
||||
identErrors := make(map[string]error)
|
||||
for _, auth := range order.Identifiers {
|
||||
identErrors[auth.Value] = err
|
||||
}
|
||||
return CertificateResource{}, identErrors
|
||||
}
|
||||
authz, failures := c.getAuthzForOrder(order)
|
||||
// If any challenge fails - return. Do not generate partial SAN certificates.
|
||||
if len(failures) > 0 {
|
||||
/*for _, auth := range authz {
|
||||
c.disableAuthz(auth)
|
||||
}*/
|
||||
|
||||
return CertificateResource{}, failures
|
||||
}
|
||||
|
||||
errs := c.solveChallengeForAuthz(authz)
|
||||
// If any challenge fails - return. Do not generate partial SAN certificates.
|
||||
if len(errs) > 0 {
|
||||
return CertificateResource{}, errs
|
||||
}
|
||||
|
||||
logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
|
||||
|
||||
cert, err := c.requestCertificateForOrder(order, bundle, privKey, mustStaple)
|
||||
if err != nil {
|
||||
for _, auth := range authz {
|
||||
failures[auth.Identifier.Value] = err
|
||||
}
|
||||
}
|
||||
|
||||
return cert, failures
|
||||
}
|
||||
|
||||
// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA.
|
||||
func (c *Client) RevokeCertificate(certificate []byte) error {
|
||||
certificates, err := parsePEMBundle(certificate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
x509Cert := certificates[0]
|
||||
if x509Cert.IsCA {
|
||||
return fmt.Errorf("Certificate bundle starts with a CA certificate")
|
||||
}
|
||||
|
||||
encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw)
|
||||
|
||||
_, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Certificate: encodedCert}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// RenewCertificate takes a CertificateResource and tries to renew the certificate.
|
||||
// If the renewal process succeeds, the new certificate will ge returned in a new CertResource.
|
||||
// Please be aware that this function will return a new certificate in ANY case that is not an error.
|
||||
// If the server does not provide us with a new cert on a GET request to the CertURL
|
||||
// this function will start a new-cert flow where a new certificate gets generated.
|
||||
// If bundle is true, the []byte contains both the issuer certificate and
|
||||
// your issued certificate as a bundle.
|
||||
// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil.
|
||||
func (c *Client) RenewCertificate(cert CertificateResource, bundle, mustStaple bool) (CertificateResource, error) {
|
||||
// Input certificate is PEM encoded. Decode it here as we may need the decoded
|
||||
// cert later on in the renewal process. The input may be a bundle or a single certificate.
|
||||
certificates, err := parsePEMBundle(cert.Certificate)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
|
||||
x509Cert := certificates[0]
|
||||
if x509Cert.IsCA {
|
||||
return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain)
|
||||
}
|
||||
|
||||
// This is just meant to be informal for the user.
|
||||
timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC())
|
||||
logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours()))
|
||||
|
||||
// We always need to request a new certificate to renew.
|
||||
// Start by checking to see if the certificate was based off a CSR, and
|
||||
// use that if it's defined.
|
||||
if len(cert.CSR) > 0 {
|
||||
csr, err := pemDecodeTox509CSR(cert.CSR)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
newCert, failures := c.ObtainCertificateForCSR(*csr, bundle)
|
||||
return newCert, failures[cert.Domain]
|
||||
}
|
||||
|
||||
var privKey crypto.PrivateKey
|
||||
if cert.PrivateKey != nil {
|
||||
privKey, err = parsePEMPrivateKey(cert.PrivateKey)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var domains []string
|
||||
var failures map[string]error
|
||||
// check for SAN certificate
|
||||
if len(x509Cert.DNSNames) > 1 {
|
||||
domains = append(domains, x509Cert.Subject.CommonName)
|
||||
for _, sanDomain := range x509Cert.DNSNames {
|
||||
if sanDomain == x509Cert.Subject.CommonName {
|
||||
continue
|
||||
}
|
||||
domains = append(domains, sanDomain)
|
||||
}
|
||||
} else {
|
||||
domains = append(domains, x509Cert.Subject.CommonName)
|
||||
}
|
||||
|
||||
newCert, failures := c.ObtainCertificate(domains, bundle, privKey, mustStaple)
|
||||
return newCert, failures[cert.Domain]
|
||||
}
|
||||
|
||||
func (c *Client) createOrderForIdentifiers(domains []string) (orderResource, error) {
|
||||
|
||||
var identifiers []identifier
|
||||
for _, domain := range domains {
|
||||
identifiers = append(identifiers, identifier{Type: "dns", Value: domain})
|
||||
}
|
||||
|
||||
order := orderMessage{
|
||||
Identifiers: identifiers,
|
||||
}
|
||||
|
||||
var response orderMessage
|
||||
hdr, err := postJSON(c.jws, c.directory.NewOrderURL, order, &response)
|
||||
if err != nil {
|
||||
return orderResource{}, err
|
||||
}
|
||||
|
||||
orderRes := orderResource{
|
||||
URL: hdr.Get("Location"),
|
||||
orderMessage: response,
|
||||
}
|
||||
return orderRes, nil
|
||||
}
|
||||
|
||||
// Looks through the challenge combinations to find a solvable match.
|
||||
// Then solves the challenges in series and returns.
|
||||
func (c *Client) solveChallengeForAuthz(authorizations []authorization) map[string]error {
|
||||
// loop through the resources, basically through the domains.
|
||||
failures := make(map[string]error)
|
||||
for _, authz := range authorizations {
|
||||
if authz.Status == "valid" {
|
||||
// Boulder might recycle recent validated authz (see issue #267)
|
||||
logf("[INFO][%s] acme: Authorization already valid; skipping challenge", authz.Identifier.Value)
|
||||
continue
|
||||
}
|
||||
|
||||
// no solvers - no solving
|
||||
if i, solver := c.chooseSolver(authz, authz.Identifier.Value); solver != nil {
|
||||
err := solver.Solve(authz.Challenges[i], authz.Identifier.Value)
|
||||
if err != nil {
|
||||
//c.disableAuthz(authz.Identifier)
|
||||
failures[authz.Identifier.Value] = err
|
||||
}
|
||||
} else {
|
||||
//c.disableAuthz(authz)
|
||||
failures[authz.Identifier.Value] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Identifier.Value)
|
||||
}
|
||||
}
|
||||
|
||||
return failures
|
||||
}
|
||||
|
||||
// Checks all challenges from the server in order and returns the first matching solver.
|
||||
func (c *Client) chooseSolver(auth authorization, domain string) (int, solver) {
|
||||
for i, challenge := range auth.Challenges {
|
||||
if solver, ok := c.solvers[Challenge(challenge.Type)]; ok {
|
||||
return i, solver
|
||||
}
|
||||
logf("[INFO][%s] acme: Could not find solver for: %s", domain, challenge.Type)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Get the challenges needed to proof our identifier to the ACME server.
|
||||
func (c *Client) getAuthzForOrder(order orderResource) ([]authorization, map[string]error) {
|
||||
resc, errc := make(chan authorization), make(chan domainError)
|
||||
|
||||
delay := time.Second / overallRequestLimit
|
||||
|
||||
for _, authzURL := range order.Authorizations {
|
||||
time.Sleep(delay)
|
||||
|
||||
go func(authzURL string) {
|
||||
var authz authorization
|
||||
_, err := getJSON(authzURL, &authz)
|
||||
if err != nil {
|
||||
errc <- domainError{Domain: authz.Identifier.Value, Error: err}
|
||||
return
|
||||
}
|
||||
|
||||
resc <- authz
|
||||
}(authzURL)
|
||||
}
|
||||
|
||||
var responses []authorization
|
||||
failures := make(map[string]error)
|
||||
for i := 0; i < len(order.Authorizations); i++ {
|
||||
select {
|
||||
case res := <-resc:
|
||||
responses = append(responses, res)
|
||||
case err := <-errc:
|
||||
failures[err.Domain] = err.Error
|
||||
}
|
||||
}
|
||||
|
||||
logAuthz(order)
|
||||
|
||||
close(resc)
|
||||
close(errc)
|
||||
|
||||
return responses, failures
|
||||
}
|
||||
|
||||
func logAuthz(order orderResource) {
|
||||
for i, auth := range order.Authorizations {
|
||||
logf("[INFO][%s] AuthURL: %s", order.Identifiers[i].Value, auth)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanAuthz loops through the passed in slice and disables any auths which are not "valid"
|
||||
func (c *Client) disableAuthz(authURL string) error {
|
||||
var disabledAuth authorization
|
||||
_, err := postJSON(c.jws, authURL, deactivateAuthMessage{Status: "deactivated"}, &disabledAuth)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) requestCertificateForOrder(order orderResource, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, error) {
|
||||
|
||||
var err error
|
||||
if privKey == nil {
|
||||
privKey, err = generatePrivateKey(c.keyType)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// determine certificate name(s) based on the authorization resources
|
||||
commonName := order.Identifiers[0].Value
|
||||
var san []string
|
||||
for _, auth := range order.Identifiers {
|
||||
san = append(san, auth.Value)
|
||||
}
|
||||
|
||||
// TODO: should the CSR be customizable?
|
||||
csr, err := generateCsr(privKey, commonName, san, mustStaple)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
|
||||
return c.requestCertificateForCsr(order, bundle, csr, pemEncode(privKey))
|
||||
}
|
||||
|
||||
func (c *Client) requestCertificateForCsr(order orderResource, bundle bool, csr []byte, privateKeyPem []byte) (CertificateResource, error) {
|
||||
commonName := order.Identifiers[0].Value
|
||||
|
||||
var authURLs []string
|
||||
for _, auth := range order.Identifiers[1:] {
|
||||
authURLs = append(authURLs, auth.Value)
|
||||
}
|
||||
|
||||
csrString := base64.RawURLEncoding.EncodeToString(csr)
|
||||
var retOrder orderMessage
|
||||
_, error := postJSON(c.jws, order.Finalize, csrMessage{Csr: csrString}, &retOrder)
|
||||
if error != nil {
|
||||
return CertificateResource{}, error
|
||||
}
|
||||
|
||||
if retOrder.Status == "invalid" {
|
||||
return CertificateResource{}, error
|
||||
}
|
||||
|
||||
certRes := CertificateResource{
|
||||
Domain: commonName,
|
||||
CertURL: retOrder.Certificate,
|
||||
PrivateKey: privateKeyPem,
|
||||
}
|
||||
|
||||
if retOrder.Status == "valid" {
|
||||
// if the certificate is available right away, short cut!
|
||||
ok, err := c.checkCertResponse(retOrder, &certRes, bundle)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
|
||||
if ok {
|
||||
return certRes, nil
|
||||
}
|
||||
}
|
||||
|
||||
maxChecks := 1000
|
||||
for i := 0; i < maxChecks; i++ {
|
||||
_, err := getJSON(order.URL, &retOrder)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
done, err := c.checkCertResponse(retOrder, &certRes, bundle)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
if i == maxChecks-1 {
|
||||
return CertificateResource{}, fmt.Errorf("polled for certificate %d times; giving up", i)
|
||||
}
|
||||
}
|
||||
|
||||
return certRes, nil
|
||||
}
|
||||
|
||||
// checkCertResponse checks to see if the certificate is ready and a link is contained in the
|
||||
// response. if so, loads it into certRes and returns true. If the cert
|
||||
// is not yet ready, it returns false. The certRes input
|
||||
// should already have the Domain (common name) field populated. If bundle is
|
||||
// true, the certificate will be bundled with the issuer's cert.
|
||||
func (c *Client) checkCertResponse(order orderMessage, certRes *CertificateResource, bundle bool) (bool, error) {
|
||||
|
||||
switch order.Status {
|
||||
case "valid":
|
||||
resp, err := httpGet(order.Certificate)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cert, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// The issuer certificate link is always supplied via an "up" link
|
||||
// in the response headers of a new certificate.
|
||||
links := parseLinks(resp.Header["Link"])
|
||||
if link, ok := links["up"]; ok {
|
||||
issuerCert, err := c.getIssuerCertificate(link)
|
||||
|
||||
if err != nil {
|
||||
// If we fail to acquire the issuer cert, return the issued certificate - do not fail.
|
||||
logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", certRes.Domain, err)
|
||||
} else {
|
||||
issuerCert = pemEncode(derCertificateBytes(issuerCert))
|
||||
|
||||
// If bundle is true, we want to return a certificate bundle.
|
||||
// To do this, we append the issuer cert to the issued cert.
|
||||
if bundle {
|
||||
cert = append(cert, issuerCert...)
|
||||
}
|
||||
|
||||
certRes.IssuerCertificate = issuerCert
|
||||
}
|
||||
}
|
||||
|
||||
certRes.Certificate = cert
|
||||
certRes.CertURL = order.Certificate
|
||||
certRes.CertStableURL = order.Certificate
|
||||
logf("[INFO][%s] Server responded with a certificate.", certRes.Domain)
|
||||
return true, nil
|
||||
|
||||
case "processing":
|
||||
return false, nil
|
||||
case "invalid":
|
||||
return false, errors.New("Order has invalid state: invalid")
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getIssuerCertificate requests the issuer certificate
|
||||
func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
|
||||
logf("[INFO] acme: Requesting issuer cert from %s", url)
|
||||
resp, err := httpGet(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = x509.ParseCertificate(issuerBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return issuerBytes, err
|
||||
}
|
||||
|
||||
func parseLinks(links []string) map[string]string {
|
||||
aBrkt := regexp.MustCompile("[<>]")
|
||||
slver := regexp.MustCompile("(.+) *= *\"(.+)\"")
|
||||
linkMap := make(map[string]string)
|
||||
|
||||
for _, link := range links {
|
||||
|
||||
link = aBrkt.ReplaceAllString(link, "")
|
||||
parts := strings.Split(link, ";")
|
||||
|
||||
matches := slver.FindStringSubmatch(parts[1])
|
||||
if len(matches) > 0 {
|
||||
linkMap[matches[2]] = parts[0]
|
||||
}
|
||||
}
|
||||
|
||||
return linkMap
|
||||
}
|
||||
|
||||
// validate makes the ACME server start validating a
|
||||
// challenge response, only returning once it is done.
|
||||
func validate(j *jws, domain, uri string, c challenge) error {
|
||||
var chlng challenge
|
||||
|
||||
hdr, err := postJSON(j, uri, c, &chlng)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// After the path is sent, the ACME server will access our server.
|
||||
// Repeatedly check the server for an updated status on our request.
|
||||
for {
|
||||
switch chlng.Status {
|
||||
case "valid":
|
||||
logf("[INFO][%s] The server validated our request", domain)
|
||||
return nil
|
||||
case "pending":
|
||||
break
|
||||
case "invalid":
|
||||
return handleChallengeError(chlng)
|
||||
default:
|
||||
return errors.New("The server returned an unexpected state")
|
||||
}
|
||||
|
||||
ra, err := strconv.Atoi(hdr.Get("Retry-After"))
|
||||
if err != nil {
|
||||
// The ACME server MUST return a Retry-After.
|
||||
// If it doesn't, we'll just poll hard.
|
||||
ra = 5
|
||||
}
|
||||
time.Sleep(time.Duration(ra) * time.Second)
|
||||
|
||||
hdr, err = getJSON(uri, &chlng)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
343
vendor/github.com/xenolf/lego/acmev2/crypto.go
generated
vendored
Normal file
343
vendor/github.com/xenolf/lego/acmev2/crypto.go
generated
vendored
Normal file
|
@ -0,0 +1,343 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"encoding/asn1"
|
||||
|
||||
"golang.org/x/crypto/ocsp"
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// KeyType represents the key algo as well as the key size or curve to use.
|
||||
type KeyType string
|
||||
type derCertificateBytes []byte
|
||||
|
||||
// Constants for all key types we support.
|
||||
const (
|
||||
EC256 = KeyType("P256")
|
||||
EC384 = KeyType("P384")
|
||||
RSA2048 = KeyType("2048")
|
||||
RSA4096 = KeyType("4096")
|
||||
RSA8192 = KeyType("8192")
|
||||
)
|
||||
|
||||
const (
|
||||
// OCSPGood means that the certificate is valid.
|
||||
OCSPGood = ocsp.Good
|
||||
// OCSPRevoked means that the certificate has been deliberately revoked.
|
||||
OCSPRevoked = ocsp.Revoked
|
||||
// OCSPUnknown means that the OCSP responder doesn't know about the certificate.
|
||||
OCSPUnknown = ocsp.Unknown
|
||||
// OCSPServerFailed means that the OCSP responder failed to process the request.
|
||||
OCSPServerFailed = ocsp.ServerFailed
|
||||
)
|
||||
|
||||
// Constants for OCSP must staple
|
||||
var (
|
||||
tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
|
||||
ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05}
|
||||
)
|
||||
|
||||
// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
|
||||
// the parsed response, and an error, if any. The returned []byte can be passed directly
|
||||
// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
|
||||
// issued certificate, this function will try to get the issuer certificate from the
|
||||
// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
|
||||
// values are nil, the OCSP status may be assumed OCSPUnknown.
|
||||
func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) {
|
||||
certificates, err := parsePEMBundle(bundle)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We expect the certificate slice to be ordered downwards the chain.
|
||||
// SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
|
||||
// which should always be the first two certificates. If there's no
|
||||
// OCSP server listed in the leaf cert, there's nothing to do. And if
|
||||
// we have only one certificate so far, we need to get the issuer cert.
|
||||
issuedCert := certificates[0]
|
||||
if len(issuedCert.OCSPServer) == 0 {
|
||||
return nil, nil, errors.New("no OCSP server specified in cert")
|
||||
}
|
||||
if len(certificates) == 1 {
|
||||
// TODO: build fallback. If this fails, check the remaining array entries.
|
||||
if len(issuedCert.IssuingCertificateURL) == 0 {
|
||||
return nil, nil, errors.New("no issuing certificate URL")
|
||||
}
|
||||
|
||||
resp, err := httpGet(issuedCert.IssuingCertificateURL[0])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
issuerCert, err := x509.ParseCertificate(issuerBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Insert it into the slice on position 0
|
||||
// We want it ordered right SRV CRT -> CA
|
||||
certificates = append(certificates, issuerCert)
|
||||
}
|
||||
issuerCert := certificates[1]
|
||||
|
||||
// Finally kick off the OCSP request.
|
||||
ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(ocspReq)
|
||||
req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024))
|
||||
ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return ocspResBytes, ocspRes, nil
|
||||
}
|
||||
|
||||
func getKeyAuthorization(token string, key interface{}) (string, error) {
|
||||
var publicKey crypto.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
publicKey = k.Public()
|
||||
case *rsa.PrivateKey:
|
||||
publicKey = k.Public()
|
||||
}
|
||||
|
||||
// Generate the Key Authorization for the challenge
|
||||
jwk := &jose.JSONWebKey{Key: publicKey}
|
||||
if jwk == nil {
|
||||
return "", errors.New("Could not generate JWK from key")
|
||||
}
|
||||
thumbBytes, err := jwk.Thumbprint(crypto.SHA256)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// unpad the base64URL
|
||||
keyThumb := base64.RawURLEncoding.EncodeToString(thumbBytes)
|
||||
|
||||
return token + "." + keyThumb, nil
|
||||
}
|
||||
|
||||
// parsePEMBundle parses a certificate bundle from top to bottom and returns
|
||||
// a slice of x509 certificates. This function will error if no certificates are found.
|
||||
func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) {
|
||||
var certificates []*x509.Certificate
|
||||
var certDERBlock *pem.Block
|
||||
|
||||
for {
|
||||
certDERBlock, bundle = pem.Decode(bundle)
|
||||
if certDERBlock == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if certDERBlock.Type == "CERTIFICATE" {
|
||||
cert, err := x509.ParseCertificate(certDERBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates = append(certificates, cert)
|
||||
}
|
||||
}
|
||||
|
||||
if len(certificates) == 0 {
|
||||
return nil, errors.New("No certificates were found while parsing the bundle")
|
||||
}
|
||||
|
||||
return certificates, nil
|
||||
}
|
||||
|
||||
func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) {
|
||||
keyBlock, _ := pem.Decode(key)
|
||||
|
||||
switch keyBlock.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
|
||||
case "EC PRIVATE KEY":
|
||||
return x509.ParseECPrivateKey(keyBlock.Bytes)
|
||||
default:
|
||||
return nil, errors.New("Unknown PEM header value")
|
||||
}
|
||||
}
|
||||
|
||||
func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) {
|
||||
|
||||
switch keyType {
|
||||
case EC256:
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
case EC384:
|
||||
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
case RSA2048:
|
||||
return rsa.GenerateKey(rand.Reader, 2048)
|
||||
case RSA4096:
|
||||
return rsa.GenerateKey(rand.Reader, 4096)
|
||||
case RSA8192:
|
||||
return rsa.GenerateKey(rand.Reader, 8192)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Invalid KeyType: %s", keyType)
|
||||
}
|
||||
|
||||
func generateCsr(privateKey crypto.PrivateKey, domain string, san []string, mustStaple bool) ([]byte, error) {
|
||||
template := x509.CertificateRequest{
|
||||
Subject: pkix.Name{
|
||||
CommonName: domain,
|
||||
},
|
||||
}
|
||||
|
||||
if len(san) > 0 {
|
||||
template.DNSNames = san
|
||||
}
|
||||
|
||||
if mustStaple {
|
||||
template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
|
||||
Id: tlsFeatureExtensionOID,
|
||||
Value: ocspMustStapleFeature,
|
||||
})
|
||||
}
|
||||
|
||||
return x509.CreateCertificateRequest(rand.Reader, &template, privateKey)
|
||||
}
|
||||
|
||||
func pemEncode(data interface{}) []byte {
|
||||
var pemBlock *pem.Block
|
||||
switch key := data.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
keyBytes, _ := x509.MarshalECPrivateKey(key)
|
||||
pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}
|
||||
case *rsa.PrivateKey:
|
||||
pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}
|
||||
break
|
||||
case *x509.CertificateRequest:
|
||||
pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw}
|
||||
break
|
||||
case derCertificateBytes:
|
||||
pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))}
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
|
||||
func pemDecode(data []byte) (*pem.Block, error) {
|
||||
pemBlock, _ := pem.Decode(data)
|
||||
if pemBlock == nil {
|
||||
return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?")
|
||||
}
|
||||
|
||||
return pemBlock, nil
|
||||
}
|
||||
|
||||
func pemDecodeTox509(pem []byte) (*x509.Certificate, error) {
|
||||
pemBlock, err := pemDecode(pem)
|
||||
if pemBlock == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return x509.ParseCertificate(pemBlock.Bytes)
|
||||
}
|
||||
|
||||
func pemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) {
|
||||
pemBlock, err := pemDecode(pem)
|
||||
if pemBlock == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pemBlock.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, fmt.Errorf("PEM block is not a certificate request")
|
||||
}
|
||||
|
||||
return x509.ParseCertificateRequest(pemBlock.Bytes)
|
||||
}
|
||||
|
||||
// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate.
|
||||
// The certificate has to be PEM encoded. Any other encodings like DER will fail.
|
||||
func GetPEMCertExpiration(cert []byte) (time.Time, error) {
|
||||
pemBlock, err := pemDecode(cert)
|
||||
if pemBlock == nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return getCertExpiration(pemBlock.Bytes)
|
||||
}
|
||||
|
||||
// getCertExpiration returns the "NotAfter" date of a DER encoded certificate.
|
||||
func getCertExpiration(cert []byte) (time.Time, error) {
|
||||
pCert, err := x509.ParseCertificate(cert)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return pCert.NotAfter, nil
|
||||
}
|
||||
|
||||
func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) {
|
||||
derBytes, err := generateDerCert(privKey, time.Time{}, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil
|
||||
}
|
||||
|
||||
func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) {
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if expiration.IsZero() {
|
||||
expiration = time.Now().Add(365)
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: "ACME Challenge TEMP",
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: expiration,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment,
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{domain},
|
||||
}
|
||||
|
||||
return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
|
||||
}
|
||||
|
||||
func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser {
|
||||
return http.MaxBytesReader(nil, rd, numBytes)
|
||||
}
|
309
vendor/github.com/xenolf/lego/acmev2/dns_challenge.go
generated
vendored
Normal file
309
vendor/github.com/xenolf/lego/acmev2/dns_challenge.go
generated
vendored
Normal file
|
@ -0,0 +1,309 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
type preCheckDNSFunc func(fqdn, value string) (bool, error)
|
||||
|
||||
var (
|
||||
// PreCheckDNS checks DNS propagation before notifying ACME that
|
||||
// the DNS challenge is ready.
|
||||
PreCheckDNS preCheckDNSFunc = checkDNSPropagation
|
||||
fqdnToZone = map[string]string{}
|
||||
)
|
||||
|
||||
const defaultResolvConf = "/etc/resolv.conf"
|
||||
|
||||
var defaultNameservers = []string{
|
||||
"google-public-dns-a.google.com:53",
|
||||
"google-public-dns-b.google.com:53",
|
||||
}
|
||||
|
||||
// RecursiveNameservers are used to pre-check DNS propagations
|
||||
var RecursiveNameservers = getNameservers(defaultResolvConf, defaultNameservers)
|
||||
|
||||
// DNSTimeout is used to override the default DNS timeout of 10 seconds.
|
||||
var DNSTimeout = 10 * time.Second
|
||||
|
||||
// getNameservers attempts to get systems nameservers before falling back to the defaults
|
||||
func getNameservers(path string, defaults []string) []string {
|
||||
config, err := dns.ClientConfigFromFile(path)
|
||||
if err != nil || len(config.Servers) == 0 {
|
||||
return defaults
|
||||
}
|
||||
|
||||
systemNameservers := []string{}
|
||||
for _, server := range config.Servers {
|
||||
// ensure all servers have a port number
|
||||
if _, _, err := net.SplitHostPort(server); err != nil {
|
||||
systemNameservers = append(systemNameservers, net.JoinHostPort(server, "53"))
|
||||
} else {
|
||||
systemNameservers = append(systemNameservers, server)
|
||||
}
|
||||
}
|
||||
return systemNameservers
|
||||
}
|
||||
|
||||
// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge
|
||||
func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) {
|
||||
keyAuthShaBytes := sha256.Sum256([]byte(keyAuth))
|
||||
// base64URL encoding without padding
|
||||
value = base64.RawURLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size])
|
||||
ttl = 120
|
||||
fqdn = fmt.Sprintf("_acme-challenge.%s.", domain)
|
||||
return
|
||||
}
|
||||
|
||||
// dnsChallenge implements the dns-01 challenge according to ACME 7.5
|
||||
type dnsChallenge struct {
|
||||
jws *jws
|
||||
validate validateFunc
|
||||
provider ChallengeProvider
|
||||
}
|
||||
|
||||
func (s *dnsChallenge) Solve(chlng challenge, domain string) error {
|
||||
logf("[INFO][%s] acme: Trying to solve DNS-01", domain)
|
||||
|
||||
if s.provider == nil {
|
||||
return errors.New("No DNS Provider configured")
|
||||
}
|
||||
|
||||
// Generate the Key Authorization for the challenge
|
||||
keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.provider.Present(domain, chlng.Token, keyAuth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error presenting token: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
|
||||
if err != nil {
|
||||
log.Printf("Error cleaning up %s: %v ", domain, err)
|
||||
}
|
||||
}()
|
||||
|
||||
fqdn, value, _ := DNS01Record(domain, keyAuth)
|
||||
|
||||
logf("[INFO][%s] Checking DNS record propagation using %+v", domain, RecursiveNameservers)
|
||||
|
||||
var timeout, interval time.Duration
|
||||
switch provider := s.provider.(type) {
|
||||
case ChallengeProviderTimeout:
|
||||
timeout, interval = provider.Timeout()
|
||||
default:
|
||||
timeout, interval = 60*time.Second, 2*time.Second
|
||||
}
|
||||
|
||||
err = WaitFor(timeout, interval, func() (bool, error) {
|
||||
return PreCheckDNS(fqdn, value)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.validate(s.jws, domain, chlng.URL, challenge{Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
|
||||
}
|
||||
|
||||
// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
|
||||
func checkDNSPropagation(fqdn, value string) (bool, error) {
|
||||
// Initial attempt to resolve at the recursive NS
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if r.Rcode == dns.RcodeSuccess {
|
||||
// If we see a CNAME here then use the alias
|
||||
for _, rr := range r.Answer {
|
||||
if cn, ok := rr.(*dns.CNAME); ok {
|
||||
if cn.Hdr.Name == fqdn {
|
||||
fqdn = cn.Target
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
authoritativeNss, err := lookupNameservers(fqdn)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return checkAuthoritativeNss(fqdn, value, authoritativeNss)
|
||||
}
|
||||
|
||||
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
|
||||
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
|
||||
for _, ns := range nameservers {
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.Rcode != dns.RcodeSuccess {
|
||||
return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, rr := range r.Answer {
|
||||
if txt, ok := rr.(*dns.TXT); ok {
|
||||
if strings.Join(txt.Txt, "") == value {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false, fmt.Errorf("NS %s did not return the expected TXT record", ns)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dnsQuery will query a nameserver, iterating through the supplied servers as it retries
|
||||
// The nameserver should include a port, to facilitate testing where we talk to a mock dns server.
|
||||
func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(fqdn, rtype)
|
||||
m.SetEdns0(4096, false)
|
||||
|
||||
if !recursive {
|
||||
m.RecursionDesired = false
|
||||
}
|
||||
|
||||
// Will retry the request based on the number of servers (n+1)
|
||||
for i := 1; i <= len(nameservers)+1; i++ {
|
||||
ns := nameservers[i%len(nameservers)]
|
||||
udp := &dns.Client{Net: "udp", Timeout: DNSTimeout}
|
||||
in, _, err = udp.Exchange(m, ns)
|
||||
|
||||
if err == dns.ErrTruncated {
|
||||
tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout}
|
||||
// If the TCP request succeeds, the err will reset to nil
|
||||
in, _, err = tcp.Exchange(m, ns)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// lookupNameservers returns the authoritative nameservers for the given fqdn.
|
||||
func lookupNameservers(fqdn string) ([]string, error) {
|
||||
var authoritativeNss []string
|
||||
|
||||
zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not determine the zone: %v", err)
|
||||
}
|
||||
|
||||
r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, rr := range r.Answer {
|
||||
if ns, ok := rr.(*dns.NS); ok {
|
||||
authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
|
||||
}
|
||||
}
|
||||
|
||||
if len(authoritativeNss) > 0 {
|
||||
return authoritativeNss, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Could not determine authoritative nameservers")
|
||||
}
|
||||
|
||||
// FindZoneByFqdn determines the zone apex for the given fqdn by recursing up the
|
||||
// domain labels until the nameserver returns a SOA record in the answer section.
|
||||
func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) {
|
||||
// Do we have it cached?
|
||||
if zone, ok := fqdnToZone[fqdn]; ok {
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
labelIndexes := dns.Split(fqdn)
|
||||
for _, index := range labelIndexes {
|
||||
domain := fqdn[index:]
|
||||
|
||||
in, err := dnsQuery(domain, dns.TypeSOA, nameservers, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Any response code other than NOERROR and NXDOMAIN is treated as error
|
||||
if in.Rcode != dns.RcodeNameError && in.Rcode != dns.RcodeSuccess {
|
||||
return "", fmt.Errorf("Unexpected response code '%s' for %s",
|
||||
dns.RcodeToString[in.Rcode], domain)
|
||||
}
|
||||
|
||||
// Check if we got a SOA RR in the answer section
|
||||
if in.Rcode == dns.RcodeSuccess {
|
||||
|
||||
// CNAME records cannot/should not exist at the root of a zone.
|
||||
// So we skip a domain when a CNAME is found.
|
||||
if dnsMsgContainsCNAME(in) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ans := range in.Answer {
|
||||
if soa, ok := ans.(*dns.SOA); ok {
|
||||
zone := soa.Hdr.Name
|
||||
fqdnToZone[fqdn] = zone
|
||||
return zone, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Could not find the start of authority")
|
||||
}
|
||||
|
||||
// dnsMsgContainsCNAME checks for a CNAME answer in msg
|
||||
func dnsMsgContainsCNAME(msg *dns.Msg) bool {
|
||||
for _, ans := range msg.Answer {
|
||||
if _, ok := ans.(*dns.CNAME); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing.
|
||||
func ClearFqdnCache() {
|
||||
fqdnToZone = map[string]string{}
|
||||
}
|
||||
|
||||
// ToFqdn converts the name into a fqdn appending a trailing dot.
|
||||
func ToFqdn(name string) string {
|
||||
n := len(name)
|
||||
if n == 0 || name[n-1] == '.' {
|
||||
return name
|
||||
}
|
||||
return name + "."
|
||||
}
|
||||
|
||||
// UnFqdn converts the fqdn into a name removing the trailing dot.
|
||||
func UnFqdn(name string) string {
|
||||
n := len(name)
|
||||
if n != 0 && name[n-1] == '.' {
|
||||
return name[:n-1]
|
||||
}
|
||||
return name
|
||||
}
|
53
vendor/github.com/xenolf/lego/acmev2/dns_challenge_manual.go
generated
vendored
Normal file
53
vendor/github.com/xenolf/lego/acmev2/dns_challenge_manual.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
dnsTemplate = "%s %d IN TXT \"%s\""
|
||||
)
|
||||
|
||||
// DNSProviderManual is an implementation of the ChallengeProvider interface
|
||||
type DNSProviderManual struct{}
|
||||
|
||||
// NewDNSProviderManual returns a DNSProviderManual instance.
|
||||
func NewDNSProviderManual() (*DNSProviderManual, error) {
|
||||
return &DNSProviderManual{}, nil
|
||||
}
|
||||
|
||||
// Present prints instructions for manually creating the TXT record
|
||||
func (*DNSProviderManual) Present(domain, token, keyAuth string) error {
|
||||
fqdn, value, ttl := DNS01Record(domain, keyAuth)
|
||||
dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value)
|
||||
|
||||
authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone)
|
||||
logf("[INFO] acme: %s", dnsRecord)
|
||||
logf("[INFO] acme: Press 'Enter' when you are done")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
_, _ = reader.ReadString('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanUp prints instructions for manually removing the TXT record
|
||||
func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error {
|
||||
fqdn, _, ttl := DNS01Record(domain, keyAuth)
|
||||
dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...")
|
||||
|
||||
authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone)
|
||||
logf("[INFO] acme: %s", dnsRecord)
|
||||
return nil
|
||||
}
|
78
vendor/github.com/xenolf/lego/acmev2/error.go
generated
vendored
Normal file
78
vendor/github.com/xenolf/lego/acmev2/error.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
tosAgreementError = "Terms of service have changed"
|
||||
invalidNonceError = "urn:ietf:params:acme:error:badNonce"
|
||||
)
|
||||
|
||||
// RemoteError is the base type for all errors specific to the ACME protocol.
|
||||
type RemoteError struct {
|
||||
StatusCode int `json:"status,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Detail string `json:"detail"`
|
||||
}
|
||||
|
||||
func (e RemoteError) Error() string {
|
||||
return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail)
|
||||
}
|
||||
|
||||
// TOSError represents the error which is returned if the user needs to
|
||||
// accept the TOS.
|
||||
// TODO: include the new TOS url if we can somehow obtain it.
|
||||
type TOSError struct {
|
||||
RemoteError
|
||||
}
|
||||
|
||||
// NonceError represents the error which is returned if the
|
||||
// nonce sent by the client was not accepted by the server.
|
||||
type NonceError struct {
|
||||
RemoteError
|
||||
}
|
||||
|
||||
type domainError struct {
|
||||
Domain string
|
||||
Error error
|
||||
}
|
||||
|
||||
func handleHTTPError(resp *http.Response) error {
|
||||
var errorDetail RemoteError
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType == "application/json" || strings.HasPrefix(contentType, "application/problem+json") {
|
||||
err := json.NewDecoder(resp.Body).Decode(&errorDetail)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errorDetail.Detail = string(detailBytes)
|
||||
}
|
||||
|
||||
errorDetail.StatusCode = resp.StatusCode
|
||||
|
||||
// Check for errors we handle specifically
|
||||
if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError {
|
||||
return TOSError{errorDetail}
|
||||
}
|
||||
|
||||
if errorDetail.StatusCode == http.StatusBadRequest && errorDetail.Type == invalidNonceError {
|
||||
return NonceError{errorDetail}
|
||||
}
|
||||
|
||||
return errorDetail
|
||||
}
|
||||
|
||||
func handleChallengeError(chlng challenge) error {
|
||||
return chlng.Error
|
||||
}
|
160
vendor/github.com/xenolf/lego/acmev2/http.go
generated
vendored
Normal file
160
vendor/github.com/xenolf/lego/acmev2/http.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests.
|
||||
var UserAgent string
|
||||
|
||||
// HTTPClient is an HTTP client with a reasonable timeout value.
|
||||
var HTTPClient = http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 15 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultGoUserAgent is the Go HTTP package user agent string. Too
|
||||
// bad it isn't exported. If it changes, we should update it here, too.
|
||||
defaultGoUserAgent = "Go-http-client/1.1"
|
||||
|
||||
// ourUserAgent is the User-Agent of this underlying library package.
|
||||
ourUserAgent = "xenolf-acme"
|
||||
)
|
||||
|
||||
// httpHead performs a HEAD request with a proper User-Agent string.
|
||||
// The response body (resp.Body) is already closed when this function returns.
|
||||
func httpHead(url string) (resp *http.Response, err error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to head %q: %v", url, err)
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", userAgent())
|
||||
|
||||
resp, err = HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return resp, fmt.Errorf("failed to do head %q: %v", url, err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// httpPost performs a POST request with a proper User-Agent string.
|
||||
// Callers should close resp.Body when done reading from it.
|
||||
func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to post %q: %v", url, err)
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
req.Header.Set("User-Agent", userAgent())
|
||||
|
||||
return HTTPClient.Do(req)
|
||||
}
|
||||
|
||||
// httpGet performs a GET request with a proper User-Agent string.
|
||||
// Callers should close resp.Body when done reading from it.
|
||||
func httpGet(url string) (resp *http.Response, err error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get %q: %v", url, err)
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent())
|
||||
|
||||
return HTTPClient.Do(req)
|
||||
}
|
||||
|
||||
// getJSON performs an HTTP GET request and parses the response body
|
||||
// as JSON, into the provided respBody object.
|
||||
func getJSON(uri string, respBody interface{}) (http.Header, error) {
|
||||
resp, err := httpGet(uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get json %q: %v", uri, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
return resp.Header, handleHTTPError(resp)
|
||||
}
|
||||
|
||||
return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
|
||||
}
|
||||
|
||||
// postJSON performs an HTTP POST request and parses the response body
|
||||
// as JSON, into the provided respBody object.
|
||||
func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) {
|
||||
jsonBytes, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to marshal network message")
|
||||
}
|
||||
|
||||
resp, err := j.post(uri, jsonBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to post JWS message. -> %v", err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
|
||||
err := handleHTTPError(resp)
|
||||
|
||||
switch err.(type) {
|
||||
|
||||
case NonceError:
|
||||
|
||||
// Retry once if the nonce was invalidated
|
||||
|
||||
retryResp, err := j.post(uri, jsonBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to post JWS message. -> %v", err)
|
||||
}
|
||||
|
||||
defer retryResp.Body.Close()
|
||||
|
||||
if retryResp.StatusCode >= http.StatusBadRequest {
|
||||
return retryResp.Header, handleHTTPError(retryResp)
|
||||
}
|
||||
|
||||
if respBody == nil {
|
||||
return retryResp.Header, nil
|
||||
}
|
||||
|
||||
return retryResp.Header, json.NewDecoder(retryResp.Body).Decode(respBody)
|
||||
|
||||
default:
|
||||
return resp.Header, err
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if respBody == nil {
|
||||
return resp.Header, nil
|
||||
}
|
||||
|
||||
return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
|
||||
}
|
||||
|
||||
// userAgent builds and returns the User-Agent string to use in requests.
|
||||
func userAgent() string {
|
||||
ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent)
|
||||
return strings.TrimSpace(ua)
|
||||
}
|
41
vendor/github.com/xenolf/lego/acmev2/http_challenge.go
generated
vendored
Normal file
41
vendor/github.com/xenolf/lego/acmev2/http_challenge.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
type httpChallenge struct {
|
||||
jws *jws
|
||||
validate validateFunc
|
||||
provider ChallengeProvider
|
||||
}
|
||||
|
||||
// HTTP01ChallengePath returns the URL path for the `http-01` challenge
|
||||
func HTTP01ChallengePath(token string) string {
|
||||
return "/.well-known/acme-challenge/" + token
|
||||
}
|
||||
|
||||
func (s *httpChallenge) Solve(chlng challenge, domain string) error {
|
||||
|
||||
logf("[INFO][%s] acme: Trying to solve HTTP-01", domain)
|
||||
|
||||
// Generate the Key Authorization for the challenge
|
||||
keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.provider.Present(domain, chlng.Token, keyAuth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[%s] error presenting token: %v", domain, err)
|
||||
}
|
||||
defer func() {
|
||||
err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
|
||||
if err != nil {
|
||||
log.Printf("[%s] error cleaning up: %v", domain, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return s.validate(s.jws, domain, chlng.URL, challenge{Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
|
||||
}
|
79
vendor/github.com/xenolf/lego/acmev2/http_challenge_server.go
generated
vendored
Normal file
79
vendor/github.com/xenolf/lego/acmev2/http_challenge_server.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HTTPProviderServer implements ChallengeProvider for `http-01` challenge
|
||||
// It may be instantiated without using the NewHTTPProviderServer function if
|
||||
// you want only to use the default values.
|
||||
type HTTPProviderServer struct {
|
||||
iface string
|
||||
port string
|
||||
done chan bool
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port.
|
||||
// Setting iface and / or port to an empty string will make the server fall back to
|
||||
// the "any" interface and port 80 respectively.
|
||||
func NewHTTPProviderServer(iface, port string) *HTTPProviderServer {
|
||||
return &HTTPProviderServer{iface: iface, port: port}
|
||||
}
|
||||
|
||||
// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests.
|
||||
func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error {
|
||||
if s.port == "" {
|
||||
s.port = "80"
|
||||
}
|
||||
|
||||
var err error
|
||||
s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not start HTTP server for challenge -> %v", err)
|
||||
}
|
||||
|
||||
s.done = make(chan bool)
|
||||
go s.serve(domain, token, keyAuth)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)`
|
||||
func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error {
|
||||
if s.listener == nil {
|
||||
return nil
|
||||
}
|
||||
s.listener.Close()
|
||||
<-s.done
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HTTPProviderServer) serve(domain, token, keyAuth string) {
|
||||
path := HTTP01ChallengePath(token)
|
||||
|
||||
// The handler validates the HOST header and request type.
|
||||
// For validation it then writes the token the server returned with the challenge
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasPrefix(r.Host, domain) && r.Method == "GET" {
|
||||
w.Header().Add("Content-Type", "text/plain")
|
||||
w.Write([]byte(keyAuth))
|
||||
logf("[INFO][%s] Served key authentication", domain)
|
||||
} else {
|
||||
logf("[WARN] Received request for domain %s with method %s but the domain did not match any challenge. Please ensure your are passing the HOST header properly.", r.Host, r.Method)
|
||||
w.Write([]byte("TEST"))
|
||||
}
|
||||
})
|
||||
|
||||
httpServer := &http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
// Once httpServer is shut down we don't want any lingering
|
||||
// connections, so disable KeepAlives.
|
||||
httpServer.SetKeepAlivesEnabled(false)
|
||||
httpServer.Serve(s.listener)
|
||||
s.done <- true
|
||||
}
|
138
vendor/github.com/xenolf/lego/acmev2/jws.go
generated
vendored
Normal file
138
vendor/github.com/xenolf/lego/acmev2/jws.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
type jws struct {
|
||||
getNonceURL string
|
||||
privKey crypto.PrivateKey
|
||||
kid string
|
||||
nonces nonceManager
|
||||
}
|
||||
|
||||
// Posts a JWS signed message to the specified URL.
|
||||
// It does NOT close the response body, so the caller must
|
||||
// do that if no error was returned.
|
||||
func (j *jws) post(url string, content []byte) (*http.Response, error) {
|
||||
signedContent, err := j.signContent(url, content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to sign content -> %s", err.Error())
|
||||
}
|
||||
|
||||
data := bytes.NewBuffer([]byte(signedContent.FullSerialize()))
|
||||
resp, err := httpPost(url, "application/jose+json", data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to HTTP POST to %s -> %s", url, err.Error())
|
||||
}
|
||||
|
||||
nonce, nonceErr := getNonceFromResponse(resp)
|
||||
if nonceErr == nil {
|
||||
j.nonces.Push(nonce)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (j *jws) signContent(url string, content []byte) (*jose.JSONWebSignature, error) {
|
||||
|
||||
var alg jose.SignatureAlgorithm
|
||||
switch k := j.privKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
alg = jose.RS256
|
||||
case *ecdsa.PrivateKey:
|
||||
if k.Curve == elliptic.P256() {
|
||||
alg = jose.ES256
|
||||
} else if k.Curve == elliptic.P384() {
|
||||
alg = jose.ES384
|
||||
}
|
||||
}
|
||||
|
||||
jsonKey := jose.JSONWebKey{
|
||||
Key: j.privKey,
|
||||
KeyID: j.kid,
|
||||
}
|
||||
|
||||
signKey := jose.SigningKey{
|
||||
Algorithm: alg,
|
||||
Key: jsonKey,
|
||||
}
|
||||
options := jose.SignerOptions{
|
||||
NonceSource: j,
|
||||
ExtraHeaders: make(map[jose.HeaderKey]interface{}),
|
||||
}
|
||||
options.ExtraHeaders["url"] = url
|
||||
if j.kid == "" {
|
||||
options.EmbedJWK = true
|
||||
}
|
||||
|
||||
signer, err := jose.NewSigner(signKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create jose signer -> %s", err.Error())
|
||||
}
|
||||
|
||||
signed, err := signer.Sign(content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to sign content -> %s", err.Error())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
func (j *jws) Nonce() (string, error) {
|
||||
if nonce, ok := j.nonces.Pop(); ok {
|
||||
return nonce, nil
|
||||
}
|
||||
|
||||
return getNonce(j.getNonceURL)
|
||||
}
|
||||
|
||||
type nonceManager struct {
|
||||
nonces []string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (n *nonceManager) Pop() (string, bool) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
if len(n.nonces) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
nonce := n.nonces[len(n.nonces)-1]
|
||||
n.nonces = n.nonces[:len(n.nonces)-1]
|
||||
return nonce, true
|
||||
}
|
||||
|
||||
func (n *nonceManager) Push(nonce string) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.nonces = append(n.nonces, nonce)
|
||||
}
|
||||
|
||||
func getNonce(url string) (string, error) {
|
||||
resp, err := httpHead(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get nonce from HTTP HEAD -> %s", err.Error())
|
||||
}
|
||||
|
||||
return getNonceFromResponse(resp)
|
||||
}
|
||||
|
||||
func getNonceFromResponse(resp *http.Response) (string, error) {
|
||||
nonce := resp.Header.Get("Replay-Nonce")
|
||||
if nonce == "" {
|
||||
return "", fmt.Errorf("Server did not respond with a proper nonce header")
|
||||
}
|
||||
|
||||
return nonce, nil
|
||||
}
|
103
vendor/github.com/xenolf/lego/acmev2/messages.go
generated
vendored
Normal file
103
vendor/github.com/xenolf/lego/acmev2/messages.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// RegistrationResource represents all important informations about a registration
|
||||
// of which the client needs to keep track itself.
|
||||
type RegistrationResource struct {
|
||||
Body accountMessage `json:"body,omitempty"`
|
||||
URI string `json:"uri,omitempty"`
|
||||
}
|
||||
|
||||
type directory struct {
|
||||
NewNonceURL string `json:"newNonce"`
|
||||
NewAccountURL string `json:"newAccount"`
|
||||
NewOrderURL string `json:"newOrder"`
|
||||
RevokeCertURL string `json:"revokeCert"`
|
||||
KeyChangeURL string `json:"keyChange"`
|
||||
Meta struct {
|
||||
TermsOfService string `json:"termsOfService"`
|
||||
Website string `json:"website"`
|
||||
CaaIdentities []string `json:"caaIdentities"`
|
||||
ExternalAccountRequired bool `json:"externalAccountRequired"`
|
||||
} `json:"meta"`
|
||||
}
|
||||
|
||||
type accountMessage struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Contact []string `json:"contact,omitempty"`
|
||||
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed,omitempty"`
|
||||
Orders string `json:"orders,omitempty"`
|
||||
OnlyReturnExisting bool `json:"onlyReturnExisting,omitempty"`
|
||||
}
|
||||
|
||||
type orderResource struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
orderMessage `json:"body,omitempty"`
|
||||
}
|
||||
|
||||
type orderMessage struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Expires string `json:"expires,omitempty"`
|
||||
Identifiers []identifier `json:"identifiers"`
|
||||
NotBefore string `json:"notBefore,omitempty"`
|
||||
NotAfter string `json:"notAfter,omitempty"`
|
||||
Authorizations []string `json:"authorizations,omitempty"`
|
||||
Finalize string `json:"finalize,omitempty"`
|
||||
Certificate string `json:"certificate,omitempty"`
|
||||
}
|
||||
|
||||
type authorization struct {
|
||||
Status string `json:"status"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Identifier identifier `json:"identifier"`
|
||||
Challenges []challenge `json:"challenges"`
|
||||
}
|
||||
|
||||
type identifier struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type challenge struct {
|
||||
URL string `json:"url"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
Token string `json:"token"`
|
||||
Validated time.Time `json:"validated"`
|
||||
KeyAuthorization string `json:"keyAuthorization"`
|
||||
Error RemoteError `json:"error"`
|
||||
}
|
||||
|
||||
type csrMessage struct {
|
||||
Csr string `json:"csr"`
|
||||
}
|
||||
|
||||
type emptyObjectMessage struct {
|
||||
}
|
||||
|
||||
type revokeCertMessage struct {
|
||||
Certificate string `json:"certificate"`
|
||||
}
|
||||
|
||||
type deactivateAuthMessage struct {
|
||||
Status string `jsom:"status"`
|
||||
}
|
||||
|
||||
// CertificateResource represents a CA issued certificate.
|
||||
// PrivateKey, Certificate and IssuerCertificate are all
|
||||
// already PEM encoded and can be directly written to disk.
|
||||
// Certificate may be a certificate bundle, depending on the
|
||||
// options supplied to create it.
|
||||
type CertificateResource struct {
|
||||
Domain string `json:"domain"`
|
||||
CertURL string `json:"certUrl"`
|
||||
CertStableURL string `json:"certStableUrl"`
|
||||
AccountRef string `json:"accountRef,omitempty"`
|
||||
PrivateKey []byte `json:"-"`
|
||||
Certificate []byte `json:"-"`
|
||||
IssuerCertificate []byte `json:"-"`
|
||||
CSR []byte `json:"-"`
|
||||
}
|
1
vendor/github.com/xenolf/lego/acmev2/pop_challenge.go
generated
vendored
Normal file
1
vendor/github.com/xenolf/lego/acmev2/pop_challenge.go
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
package acme
|
28
vendor/github.com/xenolf/lego/acmev2/provider.go
generated
vendored
Normal file
28
vendor/github.com/xenolf/lego/acmev2/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package acme
|
||||
|
||||
import "time"
|
||||
|
||||
// ChallengeProvider enables implementing a custom challenge
|
||||
// provider. Present presents the solution to a challenge available to
|
||||
// be solved. CleanUp will be called by the challenge if Present ends
|
||||
// in a non-error state.
|
||||
type ChallengeProvider interface {
|
||||
Present(domain, token, keyAuth string) error
|
||||
CleanUp(domain, token, keyAuth string) error
|
||||
}
|
||||
|
||||
// ChallengeProviderTimeout allows for implementing a
|
||||
// ChallengeProvider where an unusually long timeout is required when
|
||||
// waiting for an ACME challenge to be satisfied, such as when
|
||||
// checking for DNS record progagation. If an implementor of a
|
||||
// ChallengeProvider provides a Timeout method, then the return values
|
||||
// of the Timeout method will be used when appropriate by the acme
|
||||
// package. The interval value is the time between checks.
|
||||
//
|
||||
// The default values used for timeout and interval are 60 seconds and
|
||||
// 2 seconds respectively. These are used when no Timeout method is
|
||||
// defined for the ChallengeProvider.
|
||||
type ChallengeProviderTimeout interface {
|
||||
ChallengeProvider
|
||||
Timeout() (timeout, interval time.Duration)
|
||||
}
|
29
vendor/github.com/xenolf/lego/acmev2/utils.go
generated
vendored
Normal file
29
vendor/github.com/xenolf/lego/acmev2/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package acme
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'.
|
||||
func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error {
|
||||
var lastErr string
|
||||
timeup := time.After(timeout)
|
||||
for {
|
||||
select {
|
||||
case <-timeup:
|
||||
return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr)
|
||||
default:
|
||||
}
|
||||
|
||||
stop, err := f()
|
||||
if stop {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
lastErr = err.Error()
|
||||
}
|
||||
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
202
vendor/gopkg.in/square/go-jose.v2/LICENSE
generated
vendored
Normal file
202
vendor/gopkg.in/square/go-jose.v2/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
591
vendor/gopkg.in/square/go-jose.v2/asymmetric.go
generated
vendored
Normal file
591
vendor/gopkg.in/square/go-jose.v2/asymmetric.go
generated
vendored
Normal file
|
@ -0,0 +1,591 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"gopkg.in/square/go-jose.v2/cipher"
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
type rsaEncrypterVerifier struct {
|
||||
publicKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic RSA-based decrypter/signer
|
||||
type rsaDecrypterSigner struct {
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// A generic EC-based encrypter/verifier
|
||||
type ecEncrypterVerifier struct {
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
type edEncrypterVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// A key generator for ECDH-ES
|
||||
type ecKeyGenerator struct {
|
||||
size int
|
||||
algID string
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic EC-based decrypter/signer
|
||||
type ecDecrypterSigner struct {
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
type edDecrypterSigner struct {
|
||||
privateKey ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &rsaEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: &JSONWebKey{
|
||||
Key: &privateKey.PublicKey,
|
||||
},
|
||||
signer: &rsaDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
|
||||
if sigAlg != EdDSA {
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: &JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
},
|
||||
signer: &edDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &ecEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case ES256, ES384, ES512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: &JSONWebKey{
|
||||
Key: &privateKey.PublicKey,
|
||||
},
|
||||
signer: &ecDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: encryptedKey,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek)
|
||||
case RSA_OAEP:
|
||||
return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
|
||||
}
|
||||
|
||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
||||
// Note: The random reader on decrypt operations is only used for blinding,
|
||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
defer func() {
|
||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
||||
// because of an index out of bounds error, which we want to ignore.
|
||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
||||
// only exists for preventing crashes with unpatched versions.
|
||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
// Perform some input validation.
|
||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
||||
if keyBytes != len(jek) {
|
||||
// Input size is incorrect, the encrypted payload should always match
|
||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
cek, _, err := generator.genKey()
|
||||
if err != nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
||||
// therefore deliberately ignoring errors here.
|
||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
||||
|
||||
return cek, nil
|
||||
case RSA_OAEP:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
||||
case PS256, PS384, PS512:
|
||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
||||
}
|
||||
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
generator := ecKeyGenerator{
|
||||
algID: string(alg),
|
||||
publicKey: ctx.publicKey,
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case ECDH_ES_A128KW:
|
||||
generator.size = 16
|
||||
case ECDH_ES_A192KW:
|
||||
generator.size = 24
|
||||
case ECDH_ES_A256KW:
|
||||
generator.size = 32
|
||||
}
|
||||
|
||||
kek, header, err := generator.genKey()
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(kek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get key size for EC key generator
|
||||
func (ctx ecKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Get a content encryption key for ECDH-ES
|
||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
||||
|
||||
b, err := json.Marshal(&JSONWebKey{
|
||||
Key: &priv.PublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
headers := rawHeader{
|
||||
headerEPK: makeRawMessage(b),
|
||||
}
|
||||
|
||||
return out, headers, nil
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
epk, err := headers.getEPK()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
}
|
||||
if epk == nil {
|
||||
return nil, errors.New("square/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return nil, errors.New("square/go-jose: invalid public key in epk header")
|
||||
}
|
||||
|
||||
apuData, err := headers.getAPU()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apu header")
|
||||
}
|
||||
apvData, err := headers.getAPV()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apv header")
|
||||
}
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
|
||||
}
|
||||
|
||||
var keySize int
|
||||
|
||||
algorithm := headers.getAlgorithm()
|
||||
switch algorithm {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
||||
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
|
||||
case ECDH_ES_A128KW:
|
||||
keySize = 16
|
||||
case ECDH_ES_A192KW:
|
||||
keySize = 24
|
||||
case ECDH_ES_A256KW:
|
||||
keySize = 32
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
key := deriveKey(string(algorithm), keySize)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
}
|
||||
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
if alg != EdDSA {
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
sig, err := ctx.privateKey.Sign(randReader, payload, crypto.Hash(0))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: sig,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
if alg != EdDSA {
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
||||
if !ok {
|
||||
return errors.New("square/go-jose: ed25519 signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var expectedBitSize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
expectedBitSize = 256
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
expectedBitSize = 384
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
expectedBitSize = 521
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes++
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var keySize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
keySize = 32
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
keySize = 48
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
keySize = 66
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
||||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("square/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
196
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
Normal file
196
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
nonceBytes = 16
|
||||
)
|
||||
|
||||
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
|
||||
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
|
||||
keySize := len(key) / 2
|
||||
integrityKey := key[:keySize]
|
||||
encryptionKey := key[keySize:]
|
||||
|
||||
blockCipher, err := newBlockCipher(encryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hash func() hash.Hash
|
||||
switch keySize {
|
||||
case 16:
|
||||
hash = sha256.New
|
||||
case 24:
|
||||
hash = sha512.New384
|
||||
case 32:
|
||||
hash = sha512.New
|
||||
}
|
||||
|
||||
return &cbcAEAD{
|
||||
hash: hash,
|
||||
blockCipher: blockCipher,
|
||||
authtagBytes: keySize,
|
||||
integrityKey: integrityKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// An AEAD based on CBC+HMAC
|
||||
type cbcAEAD struct {
|
||||
hash func() hash.Hash
|
||||
authtagBytes int
|
||||
integrityKey []byte
|
||||
blockCipher cipher.Block
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) NonceSize() int {
|
||||
return nonceBytes
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) Overhead() int {
|
||||
// Maximum overhead is block size (for padding) plus auth tag length, where
|
||||
// the length of the auth tag is equivalent to the key size.
|
||||
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
|
||||
}
|
||||
|
||||
// Seal encrypts and authenticates the plaintext.
|
||||
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
// Output buffer -- must take care not to mangle plaintext input.
|
||||
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
|
||||
copy(ciphertext, plaintext)
|
||||
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
|
||||
|
||||
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
|
||||
|
||||
cbc.CryptBlocks(ciphertext, ciphertext)
|
||||
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
|
||||
copy(out, ciphertext)
|
||||
copy(out[len(ciphertext):], authtag)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Open decrypts and authenticates the ciphertext.
|
||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(ciphertext) < ctx.authtagBytes {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
|
||||
}
|
||||
|
||||
offset := len(ciphertext) - ctx.authtagBytes
|
||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
||||
if match != 1 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
}
|
||||
|
||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
||||
|
||||
// Make copy of ciphertext buffer, don't want to modify in place
|
||||
buffer := append([]byte{}, []byte(ciphertext[:offset])...)
|
||||
|
||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
|
||||
}
|
||||
|
||||
cbc.CryptBlocks(buffer, buffer)
|
||||
|
||||
// Remove padding
|
||||
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
|
||||
copy(out, plaintext)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Compute an authentication tag
|
||||
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
||||
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
|
||||
n := 0
|
||||
n += copy(buffer, aad)
|
||||
n += copy(buffer[n:], nonce)
|
||||
n += copy(buffer[n:], ciphertext)
|
||||
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
|
||||
|
||||
// According to documentation, Write() on hash.Hash never fails.
|
||||
hmac := hmac.New(ctx.hash, ctx.integrityKey)
|
||||
_, _ = hmac.Write(buffer)
|
||||
|
||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
||||
}
|
||||
|
||||
// resize ensures the the given slice has a capacity of at least n bytes.
|
||||
// If the capacity of the slice is less than n, a new slice is allocated
|
||||
// and the existing data will be copied.
|
||||
func resize(in []byte, n uint64) (head, tail []byte) {
|
||||
if uint64(cap(in)) >= n {
|
||||
head = in[:n]
|
||||
} else {
|
||||
head = make([]byte, n)
|
||||
copy(head, in)
|
||||
}
|
||||
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// Apply padding
|
||||
func padBuffer(buffer []byte, blockSize int) []byte {
|
||||
missing := blockSize - (len(buffer) % blockSize)
|
||||
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
|
||||
padding := bytes.Repeat([]byte{byte(missing)}, missing)
|
||||
copy(out, padding)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove padding
|
||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
||||
if len(buffer)%blockSize != 0 {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
last := buffer[len(buffer)-1]
|
||||
count := int(last)
|
||||
|
||||
if count == 0 || count > blockSize || count > len(buffer) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
padding := bytes.Repeat([]byte{last}, count)
|
||||
if !bytes.HasSuffix(buffer, padding) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
return buffer[:len(buffer)-count], nil
|
||||
}
|
75
vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
generated
vendored
Normal file
75
vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
type concatKDF struct {
|
||||
z, info []byte
|
||||
i uint32
|
||||
cache []byte
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewConcatKDF builds a KDF reader based on the given inputs.
|
||||
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
|
||||
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
|
||||
n := 0
|
||||
n += copy(buffer, algID)
|
||||
n += copy(buffer[n:], ptyUInfo)
|
||||
n += copy(buffer[n:], ptyVInfo)
|
||||
n += copy(buffer[n:], supPubInfo)
|
||||
copy(buffer[n:], supPrivInfo)
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
return &concatKDF{
|
||||
z: z,
|
||||
info: buffer,
|
||||
hasher: hasher,
|
||||
cache: []byte{},
|
||||
i: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *concatKDF) Read(out []byte) (int, error) {
|
||||
copied := copy(out, ctx.cache)
|
||||
ctx.cache = ctx.cache[copied:]
|
||||
|
||||
for copied < len(out) {
|
||||
ctx.hasher.Reset()
|
||||
|
||||
// Write on a hash.Hash never fails
|
||||
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
|
||||
_, _ = ctx.hasher.Write(ctx.z)
|
||||
_, _ = ctx.hasher.Write(ctx.info)
|
||||
|
||||
hash := ctx.hasher.Sum(nil)
|
||||
chunkCopied := copy(out[copied:], hash)
|
||||
copied += chunkCopied
|
||||
ctx.cache = hash[chunkCopied:]
|
||||
|
||||
ctx.i++
|
||||
}
|
||||
|
||||
return copied, nil
|
||||
}
|
62
vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
generated
vendored
Normal file
62
vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
|
||||
// It is an error to call this function with a private/public key that are not on the same
|
||||
// curve. Callers must ensure that the keys are valid before calling this function. Output
|
||||
// size may be at most 1<<16 bytes (64 KiB).
|
||||
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
|
||||
if size > 1<<16 {
|
||||
panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
|
||||
}
|
||||
|
||||
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
|
||||
algID := lengthPrefixed([]byte(alg))
|
||||
ptyUInfo := lengthPrefixed(apuData)
|
||||
ptyVInfo := lengthPrefixed(apvData)
|
||||
|
||||
// suppPubInfo is the encoded length of the output size in bits
|
||||
supPubInfo := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
|
||||
|
||||
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
|
||||
panic("public key not on same curve as private key")
|
||||
}
|
||||
|
||||
z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
|
||||
reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
|
||||
|
||||
key := make([]byte, size)
|
||||
|
||||
// Read on the KDF will never fail
|
||||
_, _ = reader.Read(key)
|
||||
return key
|
||||
}
|
||||
|
||||
func lengthPrefixed(data []byte) []byte {
|
||||
out := make([]byte, len(data)+4)
|
||||
binary.BigEndian.PutUint32(out, uint32(len(data)))
|
||||
copy(out[4:], data)
|
||||
return out
|
||||
}
|
109
vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
generated
vendored
Normal file
109
vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
||||
|
||||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
||||
if len(cek)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := len(cek) / 8
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], cek[i*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer, defaultIV)
|
||||
|
||||
for t := 0; t < 6*n; t++ {
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Encrypt(buffer, buffer)
|
||||
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
out := make([]byte, (n+1)*8)
|
||||
copy(out, buffer[:8])
|
||||
for i := range r {
|
||||
copy(out[(i+1)*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
||||
if len(ciphertext)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := (len(ciphertext) / 8) - 1
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], ciphertext[(i+1)*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer[:8], ciphertext[:8])
|
||||
|
||||
for t := 6*n - 1; t >= 0; t-- {
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Decrypt(buffer, buffer)
|
||||
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
||||
return nil, errors.New("square/go-jose: failed to unwrap key")
|
||||
}
|
||||
|
||||
out := make([]byte, n*8)
|
||||
for i := range r {
|
||||
copy(out[i*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
510
vendor/gopkg.in/square/go-jose.v2/crypter.go
generated
vendored
Normal file
510
vendor/gopkg.in/square/go-jose.v2/crypter.go
generated
vendored
Normal file
|
@ -0,0 +1,510 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// Encrypter represents an encrypter which produces an encrypted JWE object.
|
||||
type Encrypter interface {
|
||||
Encrypt(plaintext []byte) (*JSONWebEncryption, error)
|
||||
EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
|
||||
Options() EncrypterOptions
|
||||
}
|
||||
|
||||
// A generic content cipher
|
||||
type contentCipher interface {
|
||||
keySize() int
|
||||
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
|
||||
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
|
||||
}
|
||||
|
||||
// A key generator (for generating/getting a CEK)
|
||||
type keyGenerator interface {
|
||||
keySize() int
|
||||
genKey() ([]byte, rawHeader, error)
|
||||
}
|
||||
|
||||
// A generic key encrypter
|
||||
type keyEncrypter interface {
|
||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
|
||||
}
|
||||
|
||||
// A generic key decrypter
|
||||
type keyDecrypter interface {
|
||||
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
|
||||
}
|
||||
|
||||
// A generic encrypter based on the given key encrypter and content cipher.
|
||||
type genericEncrypter struct {
|
||||
contentAlg ContentEncryption
|
||||
compressionAlg CompressionAlgorithm
|
||||
cipher contentCipher
|
||||
recipients []recipientKeyInfo
|
||||
keyGenerator keyGenerator
|
||||
extraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
type recipientKeyInfo struct {
|
||||
keyID string
|
||||
keyAlg KeyAlgorithm
|
||||
keyEncrypter keyEncrypter
|
||||
}
|
||||
|
||||
// EncrypterOptions represents options that can be set on new encrypters.
|
||||
type EncrypterOptions struct {
|
||||
Compression CompressionAlgorithm
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
|
||||
if eo.ExtraHeaders == nil {
|
||||
eo.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
eo.ExtraHeaders[k] = v
|
||||
return eo
|
||||
}
|
||||
|
||||
// WithContentType adds a content type ("cty") header and returns the updated
|
||||
// EncrypterOptions.
|
||||
func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
|
||||
return eo.WithHeader(HeaderContentType, contentType)
|
||||
}
|
||||
|
||||
// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
|
||||
func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
|
||||
return eo.WithHeader(HeaderType, typ)
|
||||
}
|
||||
|
||||
// Recipient represents an algorithm/key to encrypt messages to.
|
||||
type Recipient struct {
|
||||
Algorithm KeyAlgorithm
|
||||
Key interface{}
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// NewEncrypter creates an appropriate encrypter based on the key type
|
||||
func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: getContentCipher(enc),
|
||||
}
|
||||
if opts != nil {
|
||||
encrypter.compressionAlg = opts.Compression
|
||||
encrypter.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
if encrypter.cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
var keyID string
|
||||
var rawKey interface{}
|
||||
switch encryptionKey := rcpt.Key.(type) {
|
||||
case JSONWebKey:
|
||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
||||
case *JSONWebKey:
|
||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
||||
default:
|
||||
rawKey = encryptionKey
|
||||
}
|
||||
|
||||
switch rcpt.Algorithm {
|
||||
case DIRECT:
|
||||
// Direct encryption mode must be treated differently
|
||||
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = staticKeyGenerator{
|
||||
key: rawKey.([]byte),
|
||||
}
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
}
|
||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
||||
return encrypter, nil
|
||||
case ECDH_ES:
|
||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
||||
typeOf := reflect.TypeOf(rawKey)
|
||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = ecKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
algID: string(enc),
|
||||
publicKey: rawKey.(*ecdsa.PublicKey),
|
||||
}
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
}
|
||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
||||
return encrypter, nil
|
||||
default:
|
||||
// Can just add a standard recipient
|
||||
encrypter.keyGenerator = randomKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
}
|
||||
err := encrypter.addRecipient(rcpt)
|
||||
return encrypter, err
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
|
||||
func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
||||
cipher := getContentCipher(enc)
|
||||
|
||||
if cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
if rcpts == nil || len(rcpts) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: recipients is nil or empty")
|
||||
}
|
||||
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: cipher,
|
||||
keyGenerator: randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
},
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
encrypter.compressionAlg = opts.Compression
|
||||
}
|
||||
|
||||
for _, recipient := range rcpts {
|
||||
err := encrypter.addRecipient(recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return encrypter, nil
|
||||
}
|
||||
|
||||
func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
|
||||
var recipientInfo recipientKeyInfo
|
||||
|
||||
switch recipient.Algorithm {
|
||||
case DIRECT, ECDH_ES:
|
||||
return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
|
||||
}
|
||||
|
||||
recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
|
||||
if recipient.KeyID != "" {
|
||||
recipientInfo.keyID = recipient.KeyID
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
ctx.recipients = append(ctx.recipients, recipientInfo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
|
||||
switch encryptionKey := encryptionKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return newRSARecipient(alg, encryptionKey)
|
||||
case *ecdsa.PublicKey:
|
||||
return newECDHRecipient(alg, encryptionKey)
|
||||
case []byte:
|
||||
return newSymmetricRecipient(alg, encryptionKey)
|
||||
case *JSONWebKey:
|
||||
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
|
||||
recipient.keyID = encryptionKey.KeyID
|
||||
return recipient, err
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// newDecrypter creates an appropriate decrypter based on the key type
|
||||
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
||||
switch decryptionKey := decryptionKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &rsaDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return &ecDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricKeyCipher{
|
||||
key: decryptionKey,
|
||||
}, nil
|
||||
case JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
|
||||
return ctx.EncryptWithAuthData(plaintext, nil)
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
|
||||
obj := &JSONWebEncryption{}
|
||||
obj.aad = aad
|
||||
|
||||
obj.protected = &rawHeader{}
|
||||
err := obj.protected.set(headerEncryption, ctx.contentAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.recipients = make([]recipientInfo, len(ctx.recipients))
|
||||
|
||||
if len(ctx.recipients) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
|
||||
}
|
||||
|
||||
cek, headers, err := ctx.keyGenerator.genKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.protected.merge(&headers)
|
||||
|
||||
for i, info := range ctx.recipients {
|
||||
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = recipient.header.set(headerAlgorithm, info.keyAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.keyID != "" {
|
||||
err = recipient.header.set(headerKeyID, info.keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
obj.recipients[i] = recipient
|
||||
}
|
||||
|
||||
if len(ctx.recipients) == 1 {
|
||||
// Move per-recipient headers into main protected header if there's
|
||||
// only a single recipient.
|
||||
obj.protected.merge(obj.recipients[0].header)
|
||||
obj.recipients[0].header = nil
|
||||
}
|
||||
|
||||
if ctx.compressionAlg != NONE {
|
||||
plaintext, err = compress(ctx.compressionAlg, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = obj.protected.set(headerCompression, ctx.compressionAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range ctx.extraHeaders {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
(*obj.protected)[k] = makeRawMessage(b)
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.iv = parts.iv
|
||||
obj.ciphertext = parts.ciphertext
|
||||
obj.tag = parts.tag
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ctx *genericEncrypter) Options() EncrypterOptions {
|
||||
return EncrypterOptions{
|
||||
Compression: ctx.compressionAlg,
|
||||
ExtraHeaders: ctx.extraHeaders,
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt and validate the object and return the plaintext. Note that this
|
||||
// function does not support multi-recipient, if you desire multi-recipient
|
||||
// decryption use DecryptMulti instead.
|
||||
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
||||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
|
||||
}
|
||||
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
decrypter, err := newDecrypter(decryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipher := getContentCipher(headers.getEncryption())
|
||||
if cipher == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: obj.iv,
|
||||
ciphertext: obj.ciphertext,
|
||||
tag: obj.tag,
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
|
||||
var plaintext []byte
|
||||
recipient := obj.recipients[0]
|
||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
||||
|
||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
||||
if err == nil {
|
||||
// Found a valid CEK -- let's try to decrypt.
|
||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
||||
}
|
||||
|
||||
if plaintext == nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
}
|
||||
|
||||
return plaintext, err
|
||||
}
|
||||
|
||||
// DecryptMulti decrypts and validates the object and returns the plaintexts,
|
||||
// with support for multiple recipients. It returns the index of the recipient
|
||||
// for which the decryption was successful, the merged headers for that recipient,
|
||||
// and the plaintext.
|
||||
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
|
||||
globalHeaders := obj.mergedHeaders(nil)
|
||||
|
||||
critical, err := globalHeaders.getCritical()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
decrypter, err := newDecrypter(decryptionKey)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
}
|
||||
|
||||
encryption := globalHeaders.getEncryption()
|
||||
cipher := getContentCipher(encryption)
|
||||
if cipher == nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: obj.iv,
|
||||
ciphertext: obj.ciphertext,
|
||||
tag: obj.tag,
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
|
||||
index := -1
|
||||
var plaintext []byte
|
||||
var headers rawHeader
|
||||
|
||||
for i, recipient := range obj.recipients {
|
||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
||||
|
||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
||||
if err == nil {
|
||||
// Found a valid CEK -- let's try to decrypt.
|
||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
||||
if err == nil {
|
||||
index = i
|
||||
headers = recipientHeaders
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if plaintext == nil || err != nil {
|
||||
return -1, Header{}, nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
}
|
||||
|
||||
sanitized, err := headers.sanitized()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err)
|
||||
}
|
||||
|
||||
return index, sanitized, plaintext, err
|
||||
}
|
27
vendor/gopkg.in/square/go-jose.v2/doc.go
generated
vendored
Normal file
27
vendor/gopkg.in/square/go-jose.v2/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. It implements encryption and signing based on
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON
|
||||
Web Token support available in a sub-package. The library supports both the
|
||||
compact and full serialization formats, and has optional support for multiple
|
||||
recipients.
|
||||
|
||||
*/
|
||||
package jose
|
178
vendor/gopkg.in/square/go-jose.v2/encoding.go
generated
vendored
Normal file
178
vendor/gopkg.in/square/go-jose.v2/encoding.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var stripWhitespaceRegex = regexp.MustCompile("\\s")
|
||||
|
||||
// Helper function to serialize known-good objects.
|
||||
// Precondition: value is not a nil pointer.
|
||||
func mustSerializeJSON(value interface{}) []byte {
|
||||
out, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// We never want to serialize the top-level value "null," since it's not a
|
||||
// valid JOSE message. But if a caller passes in a nil pointer to this method,
|
||||
// MarshalJSON will happily serialize it as the top-level value "null". If
|
||||
// that value is then embedded in another operation, for instance by being
|
||||
// base64-encoded and fed as input to a signing algorithm
|
||||
// (https://github.com/square/go-jose/issues/22), the result will be
|
||||
// incorrect. Because this method is intended for known-good objects, and a nil
|
||||
// pointer is not a known-good object, we are free to panic in this case.
|
||||
// Note: It's not possible to directly check whether the data pointed at by an
|
||||
// interface is a nil pointer, so we do this hacky workaround.
|
||||
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
|
||||
if string(out) == "null" {
|
||||
panic("Tried to serialize a nil pointer.")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Strip all newlines and whitespace
|
||||
func stripWhitespace(data string) string {
|
||||
return stripWhitespaceRegex.ReplaceAllString(data, "")
|
||||
}
|
||||
|
||||
// Perform compression based on algorithm
|
||||
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return deflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Perform decompression based on algorithm
|
||||
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return inflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Compress with DEFLATE
|
||||
func deflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
|
||||
// Writing to byte buffer, err is always nil
|
||||
writer, _ := flate.NewWriter(output, 1)
|
||||
_, _ = io.Copy(writer, bytes.NewBuffer(input))
|
||||
|
||||
err := writer.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// Decompress with DEFLATE
|
||||
func inflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
reader := flate.NewReader(bytes.NewBuffer(input))
|
||||
|
||||
_, err := io.Copy(output, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = reader.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
|
||||
type byteBuffer struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newBuffer(data []byte) *byteBuffer {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return &byteBuffer{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
|
||||
if len(data) > length {
|
||||
panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
||||
}
|
||||
pad := make([]byte, length-len(data))
|
||||
return newBuffer(append(pad, data...))
|
||||
}
|
||||
|
||||
func newBufferFromInt(num uint64) *byteBuffer {
|
||||
data := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(data, num)
|
||||
return newBuffer(bytes.TrimLeft(data, "\x00"))
|
||||
}
|
||||
|
||||
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(b.base64())
|
||||
}
|
||||
|
||||
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
|
||||
var encoded string
|
||||
err := json.Unmarshal(data, &encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if encoded == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
decoded, err := base64.RawURLEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = *newBuffer(decoded)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteBuffer) base64() string {
|
||||
return base64.RawURLEncoding.EncodeToString(b.data)
|
||||
}
|
||||
|
||||
func (b *byteBuffer) bytes() []byte {
|
||||
// Handling nil here allows us to transparently handle nil slices when serializing.
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b byteBuffer) bigInt() *big.Int {
|
||||
return new(big.Int).SetBytes(b.data)
|
||||
}
|
||||
|
||||
func (b byteBuffer) toInt() int {
|
||||
return int(b.bigInt().Int64())
|
||||
}
|
27
vendor/gopkg.in/square/go-jose.v2/json/LICENSE
generated
vendored
Normal file
27
vendor/gopkg.in/square/go-jose.v2/json/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1183
vendor/gopkg.in/square/go-jose.v2/json/decode.go
generated
vendored
Normal file
1183
vendor/gopkg.in/square/go-jose.v2/json/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1197
vendor/gopkg.in/square/go-jose.v2/json/encode.go
generated
vendored
Normal file
1197
vendor/gopkg.in/square/go-jose.v2/json/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
141
vendor/gopkg.in/square/go-jose.v2/json/indent.go
generated
vendored
Normal file
141
vendor/gopkg.in/square/go-jose.v2/json/indent.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(&scan, c)
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||
// Although leading space characters (space, tab, carriage return, newline)
|
||||
// at the beginning of src are dropped, trailing space characters
|
||||
// at the end of src are preserved and copied to dst.
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(&scan, c)
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
623
vendor/gopkg.in/square/go-jose.v2/json/scanner.go
generated
vendored
Normal file
623
vendor/gopkg.in/square/go-jose.v2/json/scanner.go
generated
vendored
Normal file
|
@ -0,0 +1,623 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextValue splits data after the next whole JSON value,
|
||||
// returning that value and the bytes that follow it as separate slices.
|
||||
// scan is passed in for use by nextValue to avoid an allocation.
|
||||
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
||||
scan.reset()
|
||||
for i, c := range data {
|
||||
v := scan.step(scan, c)
|
||||
if v >= scanEndObject {
|
||||
switch v {
|
||||
// probe the scanner with a space to determine whether we will
|
||||
// get scanEnd on the next character. Otherwise, if the next character
|
||||
// is not a space, scanEndTop allocates a needless error.
|
||||
case scanEndObject, scanEndArray:
|
||||
if scan.step(scan, ' ') == scanEnd {
|
||||
return data[:i+1], data[i+1:], nil
|
||||
}
|
||||
case scanError:
|
||||
return nil, nil, scan.err
|
||||
case scanEnd:
|
||||
return data[:i], data[i:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return nil, nil, scan.err
|
||||
}
|
||||
return data, nil, nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset() and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, byte) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// 1-byte redo (see undo method)
|
||||
redo bool
|
||||
redoCode int
|
||||
redoState func(*scanner, byte) int
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode
|
||||
bytes int64
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.redo = false
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
func (s *scanner) pushParseState(p int) {
|
||||
s.parseState = append(s.parseState, p)
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
s.redo = false
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
s.pushParseState(parseObjectKey)
|
||||
return scanBeginObject
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
s.pushParseState(parseArrayValue)
|
||||
return scanBeginArray
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if c <= ' ' && isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) int {
|
||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) int {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
||||
|
||||
// undo causes the scanner to return scanCode from the next state transition.
|
||||
// This gives callers a simple 1-byte undo mechanism.
|
||||
func (s *scanner) undo(scanCode int) {
|
||||
if s.redo {
|
||||
panic("json: invalid use of scanner")
|
||||
}
|
||||
s.redoCode = scanCode
|
||||
s.redoState = s.step
|
||||
s.step = stateRedo
|
||||
s.redo = true
|
||||
}
|
||||
|
||||
// stateRedo helps implement the scanner's 1-byte undo.
|
||||
func stateRedo(s *scanner, c byte) int {
|
||||
s.redo = false
|
||||
s.step = s.redoState
|
||||
return s.redoCode
|
||||
}
|
480
vendor/gopkg.in/square/go-jose.v2/json/stream.go
generated
vendored
Normal file
480
vendor/gopkg.in/square/go-jose.v2/json/stream.go
generated
vendored
Normal file
|
@ -0,0 +1,480 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON objects from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value"}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
for {
|
||||
// Look in the buffer for a new value.
|
||||
for i, c := range dec.buf[scanp:] {
|
||||
dec.scan.bytes++
|
||||
v := dec.scan.step(&dec.scan, c)
|
||||
if v == scanEnd {
|
||||
scanp += i
|
||||
break Input
|
||||
}
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
scanp += i + 1
|
||||
break Input
|
||||
}
|
||||
if v == scanError {
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
scanp = len(dec.buf)
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
e := newEncodeState()
|
||||
err := e.marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
if _, err = enc.w.Write(e.Bytes()); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
encodeStatePool.Put(e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON object.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns *m as the JSON encoding of m.
|
||||
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Marshaler = (*RawMessage)(nil)
|
||||
var _ Unmarshaler = (*RawMessage)(nil)
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token interface{}
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x interface{}
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clearOffset(err error) {
|
||||
if s, ok := err.(*SyntaxError); ok {
|
||||
s.Offset = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO
|
||||
|
||||
// EncodeToken writes the given JSON token to the stream.
|
||||
// It returns an error if the delimiters [ ] { } are not properly used.
|
||||
//
|
||||
// EncodeToken does not call Flush, because usually it is part of
|
||||
// a larger operation such as Encode, and those will call Flush when finished.
|
||||
// Callers that create an Encoder and then invoke EncodeToken directly,
|
||||
// without using Encode, need to call Flush when finished to ensure that
|
||||
// the JSON is written to the underlying writer.
|
||||
func (e *Encoder) EncodeToken(t Token) error {
|
||||
...
|
||||
}
|
||||
|
||||
*/
|
44
vendor/gopkg.in/square/go-jose.v2/json/tags.go
generated
vendored
Normal file
44
vendor/gopkg.in/square/go-jose.v2/json/tags.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
291
vendor/gopkg.in/square/go-jose.v2/jwe.go
generated
vendored
Normal file
291
vendor/gopkg.in/square/go-jose.v2/jwe.go
generated
vendored
Normal file
|
@ -0,0 +1,291 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
|
||||
type rawJSONWebEncryption struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Unprotected *rawHeader `json:"unprotected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
|
||||
Aad *byteBuffer `json:"aad,omitempty"`
|
||||
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
|
||||
Iv *byteBuffer `json:"iv,omitempty"`
|
||||
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
|
||||
Tag *byteBuffer `json:"tag,omitempty"`
|
||||
}
|
||||
|
||||
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
|
||||
type rawRecipientInfo struct {
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
EncryptedKey string `json:"encrypted_key,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebEncryption represents an encrypted JWE object after parsing.
|
||||
type JSONWebEncryption struct {
|
||||
Header Header
|
||||
protected, unprotected *rawHeader
|
||||
recipients []recipientInfo
|
||||
aad, iv, ciphertext, tag []byte
|
||||
original *rawJSONWebEncryption
|
||||
}
|
||||
|
||||
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
|
||||
type recipientInfo struct {
|
||||
header *rawHeader
|
||||
encryptedKey []byte
|
||||
}
|
||||
|
||||
// GetAuthData retrieves the (optional) authenticated data attached to the object.
|
||||
func (obj JSONWebEncryption) GetAuthData() []byte {
|
||||
if obj.aad != nil {
|
||||
out := make([]byte, len(obj.aad))
|
||||
copy(out, obj.aad)
|
||||
return out
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the merged header values
|
||||
func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(obj.protected)
|
||||
out.merge(obj.unprotected)
|
||||
|
||||
if recipient != nil {
|
||||
out.merge(recipient.header)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Get the additional authenticated data from a JWE object.
|
||||
func (obj JSONWebEncryption) computeAuthData() []byte {
|
||||
var protected string
|
||||
|
||||
if obj.original != nil {
|
||||
protected = obj.original.Protected.base64()
|
||||
} else {
|
||||
protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
|
||||
}
|
||||
|
||||
output := []byte(protected)
|
||||
if obj.aad != nil {
|
||||
output = append(output, '.')
|
||||
output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// ParseEncrypted parses an encrypted message in compact or full serialization format.
|
||||
func ParseEncrypted(input string) (*JSONWebEncryption, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
return parseEncryptedFull(input)
|
||||
}
|
||||
|
||||
return parseEncryptedCompact(input)
|
||||
}
|
||||
|
||||
// parseEncryptedFull parses a message in compact format.
|
||||
func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
|
||||
var parsed rawJSONWebEncryption
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWE object from the raw JSON.
|
||||
func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
||||
obj := &JSONWebEncryption{
|
||||
original: parsed,
|
||||
unprotected: parsed.Unprotected,
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected headers
|
||||
if parsed.Unprotected != nil {
|
||||
if nonce := parsed.Unprotected.getNonce(); nonce != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
}
|
||||
if parsed.Header != nil {
|
||||
if nonce := parsed.Header.getNonce(); nonce != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
}
|
||||
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
||||
}
|
||||
}
|
||||
|
||||
// Note: this must be called _after_ we parse the protected header,
|
||||
// otherwise fields from the protected header will not get picked up.
|
||||
var err error
|
||||
mergedHeaders := obj.mergedHeaders(nil)
|
||||
obj.Header, err = mergedHeaders.sanitized()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
|
||||
}
|
||||
|
||||
if len(parsed.Recipients) == 0 {
|
||||
obj.recipients = []recipientInfo{
|
||||
{
|
||||
header: parsed.Header,
|
||||
encryptedKey: parsed.EncryptedKey.bytes(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
|
||||
for r := range parsed.Recipients {
|
||||
encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
obj.recipients[r].header = parsed.Recipients[r].Header
|
||||
obj.recipients[r].encryptedKey = encryptedKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, recipient := range obj.recipients {
|
||||
headers := obj.mergedHeaders(&recipient)
|
||||
if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
|
||||
return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
|
||||
}
|
||||
}
|
||||
|
||||
obj.iv = parsed.Iv.bytes()
|
||||
obj.ciphertext = parsed.Ciphertext.bytes()
|
||||
obj.tag = parsed.Tag.bytes()
|
||||
obj.aad = parsed.Aad.bytes()
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseEncryptedCompact parses a message in compact format.
|
||||
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 5 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := base64.RawURLEncoding.DecodeString(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tag, err := base64.RawURLEncoding.DecodeString(parts[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJSONWebEncryption{
|
||||
Protected: newBuffer(rawProtected),
|
||||
EncryptedKey: newBuffer(encryptedKey),
|
||||
Iv: newBuffer(iv),
|
||||
Ciphertext: newBuffer(ciphertext),
|
||||
Tag: newBuffer(tag),
|
||||
}
|
||||
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JSONWebEncryption) CompactSerialize() (string, error) {
|
||||
if len(obj.recipients) != 1 || obj.unprotected != nil ||
|
||||
obj.protected == nil || obj.recipients[0].header != nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s.%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
|
||||
base64.RawURLEncoding.EncodeToString(obj.iv),
|
||||
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
|
||||
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JSONWebEncryption) FullSerialize() string {
|
||||
raw := rawJSONWebEncryption{
|
||||
Unprotected: obj.unprotected,
|
||||
Iv: newBuffer(obj.iv),
|
||||
Ciphertext: newBuffer(obj.ciphertext),
|
||||
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
|
||||
Tag: newBuffer(obj.tag),
|
||||
Aad: newBuffer(obj.aad),
|
||||
Recipients: []rawRecipientInfo{},
|
||||
}
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
for _, recipient := range obj.recipients {
|
||||
info := rawRecipientInfo{
|
||||
Header: recipient.header,
|
||||
EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
|
||||
}
|
||||
raw.Recipients = append(raw.Recipients, info)
|
||||
}
|
||||
} else {
|
||||
// Use flattened serialization
|
||||
raw.Header = obj.recipients[0].header
|
||||
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
|
||||
}
|
||||
|
||||
if obj.protected != nil {
|
||||
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
|
||||
}
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
556
vendor/gopkg.in/square/go-jose.v2/jwk.go
generated
vendored
Normal file
556
vendor/gopkg.in/square/go-jose.v2/jwk.go
generated
vendored
Normal file
|
@ -0,0 +1,556 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
|
||||
type rawJSONWebKey struct {
|
||||
Use string `json:"use,omitempty"`
|
||||
Kty string `json:"kty,omitempty"`
|
||||
Kid string `json:"kid,omitempty"`
|
||||
Crv string `json:"crv,omitempty"`
|
||||
Alg string `json:"alg,omitempty"`
|
||||
K *byteBuffer `json:"k,omitempty"`
|
||||
X *byteBuffer `json:"x,omitempty"`
|
||||
Y *byteBuffer `json:"y,omitempty"`
|
||||
N *byteBuffer `json:"n,omitempty"`
|
||||
E *byteBuffer `json:"e,omitempty"`
|
||||
// -- Following fields are only used for private keys --
|
||||
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
|
||||
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
|
||||
// we have a private key whereas D == nil means we have only a public key.
|
||||
D *byteBuffer `json:"d,omitempty"`
|
||||
P *byteBuffer `json:"p,omitempty"`
|
||||
Q *byteBuffer `json:"q,omitempty"`
|
||||
Dp *byteBuffer `json:"dp,omitempty"`
|
||||
Dq *byteBuffer `json:"dq,omitempty"`
|
||||
Qi *byteBuffer `json:"qi,omitempty"`
|
||||
// Certificates
|
||||
X5c []string `json:"x5c,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebKey represents a public or private key in JWK format.
|
||||
type JSONWebKey struct {
|
||||
Key interface{}
|
||||
Certificates []*x509.Certificate
|
||||
KeyID string
|
||||
Algorithm string
|
||||
Use string
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the given key to its JSON representation.
|
||||
func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
||||
var raw *rawJSONWebKey
|
||||
var err error
|
||||
|
||||
switch key := k.Key.(type) {
|
||||
case ed25519.PublicKey:
|
||||
raw = fromEdPublicKey(key)
|
||||
case *ecdsa.PublicKey:
|
||||
raw, err = fromEcPublicKey(key)
|
||||
case *rsa.PublicKey:
|
||||
raw = fromRsaPublicKey(key)
|
||||
case ed25519.PrivateKey:
|
||||
raw, err = fromEdPrivateKey(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
raw, err = fromEcPrivateKey(key)
|
||||
case *rsa.PrivateKey:
|
||||
raw, err = fromRsaPrivateKey(key)
|
||||
case []byte:
|
||||
raw, err = fromSymmetricKey(key)
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw.Kid = k.KeyID
|
||||
raw.Alg = k.Algorithm
|
||||
raw.Use = k.Use
|
||||
|
||||
for _, cert := range k.Certificates {
|
||||
raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
|
||||
}
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads a key from its JSON representation.
|
||||
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
||||
var raw rawJSONWebKey
|
||||
err = json.Unmarshal(data, &raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var key interface{}
|
||||
switch raw.Kty {
|
||||
case "EC":
|
||||
if raw.D != nil {
|
||||
key, err = raw.ecPrivateKey()
|
||||
} else {
|
||||
key, err = raw.ecPublicKey()
|
||||
}
|
||||
case "RSA":
|
||||
if raw.D != nil {
|
||||
key, err = raw.rsaPrivateKey()
|
||||
} else {
|
||||
key, err = raw.rsaPublicKey()
|
||||
}
|
||||
case "oct":
|
||||
key, err = raw.symmetricKey()
|
||||
case "OKP":
|
||||
if raw.Crv == "Ed25519" && raw.X != nil {
|
||||
if raw.D != nil {
|
||||
key, err = raw.edPrivateKey()
|
||||
} else {
|
||||
key, err = raw.edPublicKey()
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
*k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
|
||||
}
|
||||
|
||||
k.Certificates = make([]*x509.Certificate, len(raw.X5c))
|
||||
for i, cert := range raw.X5c {
|
||||
raw, err := base64.StdEncoding.DecodeString(cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.Certificates[i], err = x509.ParseCertificate(raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// JSONWebKeySet represents a JWK Set object.
|
||||
type JSONWebKeySet struct {
|
||||
Keys []JSONWebKey `json:"keys"`
|
||||
}
|
||||
|
||||
// Key convenience method returns keys by key ID. Specification states
|
||||
// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
|
||||
// cases where they are not distinct. Hence method returns a slice
|
||||
// of JSONWebKeys.
|
||||
func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
|
||||
var keys []JSONWebKey
|
||||
for _, key := range s.Keys {
|
||||
if key.KeyID == kid {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
|
||||
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
|
||||
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}`
|
||||
|
||||
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
|
||||
coordLength := curveSize(curve)
|
||||
crv, err := curveName(curve)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf(ecThumbprintTemplate, crv,
|
||||
newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
|
||||
newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
|
||||
}
|
||||
|
||||
func rsaThumbprintInput(n *big.Int, e int) (string, error) {
|
||||
return fmt.Sprintf(rsaThumbprintTemplate,
|
||||
newBufferFromInt(uint64(e)).base64(),
|
||||
newBuffer(n.Bytes()).base64()), nil
|
||||
}
|
||||
|
||||
func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
|
||||
crv := "Ed25519"
|
||||
return fmt.Sprintf(edThumbprintTemplate, crv,
|
||||
newFixedSizeBuffer(ed, 32).base64()), nil
|
||||
}
|
||||
|
||||
// Thumbprint computes the JWK Thumbprint of a key using the
|
||||
// indicated hash algorithm.
|
||||
func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
||||
var input string
|
||||
var err error
|
||||
switch key := k.Key.(type) {
|
||||
case ed25519.PublicKey:
|
||||
input, err = edThumbprintInput(key)
|
||||
case *ecdsa.PublicKey:
|
||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
||||
case *ecdsa.PrivateKey:
|
||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
||||
case *rsa.PublicKey:
|
||||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case *rsa.PrivateKey:
|
||||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case ed25519.PrivateKey:
|
||||
input, err = edThumbprintInput(ed25519.PublicKey(key[0:32]))
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write([]byte(input))
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
|
||||
func (k *JSONWebKey) IsPublic() bool {
|
||||
switch k.Key.(type) {
|
||||
case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Public creates JSONWebKey with corresponding publik key if JWK represents asymmetric private key.
|
||||
func (k *JSONWebKey) Public() JSONWebKey {
|
||||
if k.IsPublic() {
|
||||
return *k
|
||||
}
|
||||
ret := *k
|
||||
switch key := k.Key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
case *rsa.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
case ed25519.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
default:
|
||||
return JSONWebKey{} // returning invalid key
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Valid checks that the key contains the expected parameters.
|
||||
func (k *JSONWebKey) Valid() bool {
|
||||
if k.Key == nil {
|
||||
return false
|
||||
}
|
||||
switch key := k.Key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
if key.Curve == nil || key.X == nil || key.Y == nil {
|
||||
return false
|
||||
}
|
||||
case *ecdsa.PrivateKey:
|
||||
if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
|
||||
return false
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
if key.N == nil || key.E == 0 {
|
||||
return false
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
|
||||
return false
|
||||
}
|
||||
case ed25519.PublicKey:
|
||||
if len(key) != 32 {
|
||||
return false
|
||||
}
|
||||
case ed25519.PrivateKey:
|
||||
if len(key) != 64 {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
|
||||
if key.N == nil || key.E == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
|
||||
}
|
||||
|
||||
return &rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "OKP",
|
||||
Crv: "Ed25519",
|
||||
X: newBuffer(pub),
|
||||
}
|
||||
}
|
||||
|
||||
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "RSA",
|
||||
N: newBuffer(pub.N.Bytes()),
|
||||
E: newBufferFromInt(uint64(pub.E)),
|
||||
}
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: x,
|
||||
Y: y,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
|
||||
if pub == nil || pub.X == nil || pub.Y == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
|
||||
}
|
||||
|
||||
name, err := curveName(pub.Curve)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := curveSize(pub.Curve)
|
||||
|
||||
xBytes := pub.X.Bytes()
|
||||
yBytes := pub.Y.Bytes()
|
||||
|
||||
if len(xBytes) > size || len(yBytes) > size {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
|
||||
}
|
||||
|
||||
key := &rawJSONWebKey{
|
||||
Kty: "EC",
|
||||
Crv: name,
|
||||
X: newFixedSizeBuffer(xBytes, size),
|
||||
Y: newFixedSizeBuffer(yBytes, size),
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
|
||||
var missing []string
|
||||
switch {
|
||||
case key.D == nil:
|
||||
missing = append(missing, "D")
|
||||
case key.X == nil:
|
||||
missing = append(missing, "X")
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
privateKey := make([]byte, ed25519.PrivateKeySize)
|
||||
copy(privateKey[0:32], key.X.bytes())
|
||||
copy(privateKey[32:], key.D.bytes())
|
||||
rv := ed25519.PrivateKey(privateKey)
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
|
||||
if key.X == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value")
|
||||
}
|
||||
publicKey := make([]byte, ed25519.PublicKeySize)
|
||||
copy(publicKey[0:32], key.X.bytes())
|
||||
rv := ed25519.PublicKey(publicKey)
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
|
||||
var missing []string
|
||||
switch {
|
||||
case key.N == nil:
|
||||
missing = append(missing, "N")
|
||||
case key.E == nil:
|
||||
missing = append(missing, "E")
|
||||
case key.D == nil:
|
||||
missing = append(missing, "D")
|
||||
case key.P == nil:
|
||||
missing = append(missing, "P")
|
||||
case key.Q == nil:
|
||||
missing = append(missing, "Q")
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
rv := &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
Primes: []*big.Int{
|
||||
key.P.bigInt(),
|
||||
key.Q.bigInt(),
|
||||
},
|
||||
}
|
||||
|
||||
if key.Dp != nil {
|
||||
rv.Precomputed.Dp = key.Dp.bigInt()
|
||||
}
|
||||
if key.Dq != nil {
|
||||
rv.Precomputed.Dq = key.Dq.bigInt()
|
||||
}
|
||||
if key.Qi != nil {
|
||||
rv.Precomputed.Qinv = key.Qi.bigInt()
|
||||
}
|
||||
|
||||
err := rv.Validate()
|
||||
return rv, err
|
||||
}
|
||||
|
||||
func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
|
||||
raw := fromEdPublicKey(ed25519.PublicKey(ed[0:32]))
|
||||
|
||||
raw.D = newBuffer(ed[32:])
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
|
||||
if len(rsa.Primes) != 2 {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
raw := fromRsaPublicKey(&rsa.PublicKey)
|
||||
|
||||
raw.D = newBuffer(rsa.D.Bytes())
|
||||
raw.P = newBuffer(rsa.Primes[0].Bytes())
|
||||
raw.Q = newBuffer(rsa.Primes[1].Bytes())
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil || key.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: x,
|
||||
Y: y,
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
|
||||
raw, err := fromEcPublicKey(&ec.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ec.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key")
|
||||
}
|
||||
|
||||
raw.D = newBuffer(ec.D.Bytes())
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "oct",
|
||||
K: newBuffer(key),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
||||
if key.K == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
|
||||
}
|
||||
return key.K.bytes(), nil
|
||||
}
|
321
vendor/gopkg.in/square/go-jose.v2/jws.go
generated
vendored
Normal file
321
vendor/gopkg.in/square/go-jose.v2/jws.go
generated
vendored
Normal file
|
@ -0,0 +1,321 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
|
||||
type rawJSONWebSignature struct {
|
||||
Payload *byteBuffer `json:"payload,omitempty"`
|
||||
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
|
||||
type rawSignatureInfo struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebSignature represents a signed JWS object after parsing.
|
||||
type JSONWebSignature struct {
|
||||
payload []byte
|
||||
// Signatures attached to this object (may be more than one for multi-sig).
|
||||
// Be careful about accessing these directly, prefer to use Verify() or
|
||||
// VerifyMulti() to ensure that the data you're getting is verified.
|
||||
Signatures []Signature
|
||||
}
|
||||
|
||||
// Signature represents a single signature over the JWS payload and protected header.
|
||||
type Signature struct {
|
||||
// Merged header fields. Contains both protected and unprotected header
|
||||
// values. Prefer using Protected and Unprotected fields instead of this.
|
||||
// Values in this header may or may not have been signed and in general
|
||||
// should not be trusted.
|
||||
Header Header
|
||||
|
||||
// Protected header. Values in this header were signed and
|
||||
// will be verified as part of the signature verification process.
|
||||
Protected Header
|
||||
|
||||
// Unprotected header. Values in this header were not signed
|
||||
// and in general should not be trusted.
|
||||
Unprotected Header
|
||||
|
||||
// The actual signature value
|
||||
Signature []byte
|
||||
|
||||
protected *rawHeader
|
||||
header *rawHeader
|
||||
original *rawSignatureInfo
|
||||
}
|
||||
|
||||
// ParseSigned parses a signed message in compact or full serialization format.
|
||||
func ParseSigned(input string) (*JSONWebSignature, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
return parseSignedFull(input)
|
||||
}
|
||||
|
||||
return parseSignedCompact(input)
|
||||
}
|
||||
|
||||
// Get a header value
|
||||
func (sig Signature) mergedHeaders() rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(sig.protected)
|
||||
out.merge(sig.header)
|
||||
return out
|
||||
}
|
||||
|
||||
// Compute data to be signed
|
||||
func (obj JSONWebSignature) computeAuthData(signature *Signature) []byte {
|
||||
var serializedProtected string
|
||||
|
||||
if signature.original != nil && signature.original.Protected != nil {
|
||||
serializedProtected = signature.original.Protected.base64()
|
||||
} else if signature.protected != nil {
|
||||
serializedProtected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON(signature.protected))
|
||||
} else {
|
||||
serializedProtected = ""
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("%s.%s",
|
||||
serializedProtected,
|
||||
base64.RawURLEncoding.EncodeToString(obj.payload)))
|
||||
}
|
||||
|
||||
// parseSignedFull parses a message in full format.
|
||||
func parseSignedFull(input string) (*JSONWebSignature, error) {
|
||||
var parsed rawJSONWebSignature
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWS object from the raw JSON.
|
||||
func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
||||
if parsed.Payload == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
|
||||
}
|
||||
|
||||
obj := &JSONWebSignature{
|
||||
payload: parsed.Payload.bytes(),
|
||||
Signatures: make([]Signature, len(parsed.Signatures)),
|
||||
}
|
||||
|
||||
if len(parsed.Signatures) == 0 {
|
||||
// No signatures array, must be flattened serialization
|
||||
signature := Signature{}
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
signature.protected = &rawHeader{}
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if parsed.Header != nil && parsed.Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
signature.header = parsed.Header
|
||||
signature.Signature = parsed.Signature.bytes()
|
||||
// Make a fake "original" rawSignatureInfo to store the unprocessed
|
||||
// Protected header. This is necessary because the Protected header can
|
||||
// contain arbitrary fields not registered as part of the spec. See
|
||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
|
||||
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
|
||||
// we cannot marshal losslessly. So we have to keep around the original bytes.
|
||||
// This is used in computeAuthData, which will first attempt to use
|
||||
// the original bytes of a protected header, and fall back on marshaling the
|
||||
// header struct only if those bytes are not available.
|
||||
signature.original = &rawSignatureInfo{
|
||||
Protected: parsed.Protected,
|
||||
Header: parsed.Header,
|
||||
Signature: parsed.Signature,
|
||||
}
|
||||
|
||||
var err error
|
||||
signature.Header, err = signature.mergedHeaders().sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if signature.header != nil {
|
||||
signature.Unprotected, err = signature.header.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if signature.protected != nil {
|
||||
signature.Protected, err = signature.protected.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := signature.Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
obj.Signatures = append(obj.Signatures, signature)
|
||||
}
|
||||
|
||||
for i, sig := range parsed.Signatures {
|
||||
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
|
||||
obj.Signatures[i].protected = &rawHeader{}
|
||||
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if sig.Header != nil && sig.Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
var err error
|
||||
obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj.Signatures[i].header != nil {
|
||||
obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Signatures[i].protected != nil {
|
||||
obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
obj.Signatures[i].Signature = sig.Signature.bytes()
|
||||
|
||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := obj.Signatures[i].Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
// Copy value of sig
|
||||
original := sig
|
||||
|
||||
obj.Signatures[i].header = sig.Header
|
||||
obj.Signatures[i].original = &original
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseSignedCompact parses a message in compact format.
|
||||
func parseSignedCompact(input string) (*JSONWebSignature, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signature, err := base64.RawURLEncoding.DecodeString(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJSONWebSignature{
|
||||
Payload: newBuffer(payload),
|
||||
Protected: newBuffer(rawProtected),
|
||||
Signature: newBuffer(signature),
|
||||
}
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JSONWebSignature) CompactSerialize() (string, error) {
|
||||
if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(obj.payload),
|
||||
base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JSONWebSignature) FullSerialize() string {
|
||||
raw := rawJSONWebSignature{
|
||||
Payload: newBuffer(obj.payload),
|
||||
}
|
||||
|
||||
if len(obj.Signatures) == 1 {
|
||||
if obj.Signatures[0].protected != nil {
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
raw.Protected = newBuffer(serializedProtected)
|
||||
}
|
||||
raw.Header = obj.Signatures[0].header
|
||||
raw.Signature = newBuffer(obj.Signatures[0].Signature)
|
||||
} else {
|
||||
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
|
||||
for i, signature := range obj.Signatures {
|
||||
raw.Signatures[i] = rawSignatureInfo{
|
||||
Header: signature.header,
|
||||
Signature: newBuffer(signature.Signature),
|
||||
}
|
||||
|
||||
if signature.protected != nil {
|
||||
raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
417
vendor/gopkg.in/square/go-jose.v2/shared.go
generated
vendored
Normal file
417
vendor/gopkg.in/square/go-jose.v2/shared.go
generated
vendored
Normal file
|
@ -0,0 +1,417 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// KeyAlgorithm represents a key management algorithm.
|
||||
type KeyAlgorithm string
|
||||
|
||||
// SignatureAlgorithm represents a signature (or MAC) algorithm.
|
||||
type SignatureAlgorithm string
|
||||
|
||||
// ContentEncryption represents a content encryption algorithm.
|
||||
type ContentEncryption string
|
||||
|
||||
// CompressionAlgorithm represents an algorithm used for plaintext compression.
|
||||
type CompressionAlgorithm string
|
||||
|
||||
// ContentType represents type of the contained data.
|
||||
type ContentType string
|
||||
|
||||
var (
|
||||
// ErrCryptoFailure represents an error in cryptographic primitive. This
|
||||
// occurs when, for example, a message had an invalid authentication tag or
|
||||
// could not be decrypted.
|
||||
ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
|
||||
|
||||
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
|
||||
// supported. This occurs when trying to instantiate an encrypter for an
|
||||
// algorithm that is not yet implemented.
|
||||
ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
|
||||
|
||||
// ErrUnsupportedKeyType indicates that the given key type/format is not
|
||||
// supported. This occurs when trying to instantiate an encrypter and passing
|
||||
// it a key of an unrecognized type or with unsupported parameters, such as
|
||||
// an RSA private key with more than two primes.
|
||||
ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
|
||||
|
||||
// ErrNotSupported serialization of object is not supported. This occurs when
|
||||
// trying to compact-serialize an object which can't be represented in
|
||||
// compact form.
|
||||
ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
|
||||
|
||||
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
|
||||
// nonce header parameter was included in an unprotected header object.
|
||||
ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
|
||||
)
|
||||
|
||||
// Key management algorithms
|
||||
const (
|
||||
ED25519 = KeyAlgorithm("ED25519")
|
||||
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
|
||||
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
|
||||
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
|
||||
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
|
||||
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
|
||||
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
|
||||
DIRECT = KeyAlgorithm("dir") // Direct encryption
|
||||
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
|
||||
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
|
||||
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
|
||||
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
|
||||
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
|
||||
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
|
||||
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
|
||||
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
|
||||
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
|
||||
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
|
||||
)
|
||||
|
||||
// Signature algorithms
|
||||
const (
|
||||
EdDSA = SignatureAlgorithm("EdDSA")
|
||||
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
|
||||
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
|
||||
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
|
||||
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
|
||||
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
|
||||
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
|
||||
ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
|
||||
ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
|
||||
ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
|
||||
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||
)
|
||||
|
||||
// Content encryption algorithms
|
||||
const (
|
||||
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
|
||||
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
|
||||
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
|
||||
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
|
||||
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
|
||||
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
|
||||
)
|
||||
|
||||
// Compression algorithms
|
||||
const (
|
||||
NONE = CompressionAlgorithm("") // No compression
|
||||
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
|
||||
)
|
||||
|
||||
// A key in the protected header of a JWS object. Use of the Header...
|
||||
// constants is preferred to enhance type safety.
|
||||
type HeaderKey string
|
||||
|
||||
const (
|
||||
HeaderType HeaderKey = "typ" // string
|
||||
HeaderContentType = "cty" // string
|
||||
|
||||
// These are set by go-jose and shouldn't need to be set by consumers of the
|
||||
// library.
|
||||
headerAlgorithm = "alg" // string
|
||||
headerEncryption = "enc" // ContentEncryption
|
||||
headerCompression = "zip" // CompressionAlgorithm
|
||||
headerCritical = "crit" // []string
|
||||
|
||||
headerAPU = "apu" // *byteBuffer
|
||||
headerAPV = "apv" // *byteBuffer
|
||||
headerEPK = "epk" // *JSONWebKey
|
||||
headerIV = "iv" // *byteBuffer
|
||||
headerTag = "tag" // *byteBuffer
|
||||
|
||||
headerJWK = "jwk" // *JSONWebKey
|
||||
headerKeyID = "kid" // string
|
||||
headerNonce = "nonce" // string
|
||||
)
|
||||
|
||||
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
|
||||
//
|
||||
// The decoding of the constituent items is deferred because we want to marshal
|
||||
// some members into particular structs rather than generic maps, but at the
|
||||
// same time we need to receive any extra fields unhandled by this library to
|
||||
// pass through to consuming code in case it wants to examine them.
|
||||
type rawHeader map[HeaderKey]*json.RawMessage
|
||||
|
||||
// Header represents the read-only JOSE header for JWE/JWS objects.
|
||||
type Header struct {
|
||||
KeyID string
|
||||
JSONWebKey *JSONWebKey
|
||||
Algorithm string
|
||||
Nonce string
|
||||
|
||||
// Any headers not recognised above get unmarshaled from JSON in a generic
|
||||
// manner and placed in this map.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsed[k] = makeRawMessage(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getString gets a string from the raw JSON, defaulting to "".
|
||||
func (parsed rawHeader) getString(k HeaderKey) string {
|
||||
v, ok := parsed[k]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
var s string
|
||||
err := json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
|
||||
// not specified.
|
||||
func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
|
||||
v := parsed[k]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var bb *byteBuffer
|
||||
err := json.Unmarshal(*v, &bb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb, nil
|
||||
}
|
||||
|
||||
// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
|
||||
func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
|
||||
return KeyAlgorithm(parsed.getString(headerAlgorithm))
|
||||
}
|
||||
|
||||
// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
|
||||
func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
|
||||
return SignatureAlgorithm(parsed.getString(headerAlgorithm))
|
||||
}
|
||||
|
||||
// getEncryption extracts parsed "enc" from the raw JSON.
|
||||
func (parsed rawHeader) getEncryption() ContentEncryption {
|
||||
return ContentEncryption(parsed.getString(headerEncryption))
|
||||
}
|
||||
|
||||
// getCompression extracts parsed "zip" from the raw JSON.
|
||||
func (parsed rawHeader) getCompression() CompressionAlgorithm {
|
||||
return CompressionAlgorithm(parsed.getString(headerCompression))
|
||||
}
|
||||
|
||||
func (parsed rawHeader) getNonce() string {
|
||||
return parsed.getString(headerNonce)
|
||||
}
|
||||
|
||||
// getEPK extracts parsed "epk" from the raw JSON.
|
||||
func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
|
||||
v := parsed[headerEPK]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var epk *JSONWebKey
|
||||
err := json.Unmarshal(*v, &epk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return epk, nil
|
||||
}
|
||||
|
||||
// getAPU extracts parsed "apu" from the raw JSON.
|
||||
func (parsed rawHeader) getAPU() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerAPU)
|
||||
}
|
||||
|
||||
// getAPV extracts parsed "apv" from the raw JSON.
|
||||
func (parsed rawHeader) getAPV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerAPV)
|
||||
}
|
||||
|
||||
// getIV extracts parsed "iv" frpom the raw JSON.
|
||||
func (parsed rawHeader) getIV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerIV)
|
||||
}
|
||||
|
||||
// getTag extracts parsed "tag" frpom the raw JSON.
|
||||
func (parsed rawHeader) getTag() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerTag)
|
||||
}
|
||||
|
||||
// getJWK extracts parsed "jwk" from the raw JSON.
|
||||
func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
|
||||
v := parsed[headerJWK]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var jwk *JSONWebKey
|
||||
err := json.Unmarshal(*v, &jwk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwk, nil
|
||||
}
|
||||
|
||||
// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
|
||||
// returns an empty slice.
|
||||
func (parsed rawHeader) getCritical() ([]string, error) {
|
||||
v := parsed[headerCritical]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var q []string
|
||||
err := json.Unmarshal(*v, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up header object from the raw JSON.
|
||||
func (parsed rawHeader) sanitized() (h Header, err error) {
|
||||
for k, v := range parsed {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case headerJWK:
|
||||
var jwk *JSONWebKey
|
||||
err = json.Unmarshal(*v, &jwk)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.JSONWebKey = jwk
|
||||
case headerKeyID:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.KeyID = s
|
||||
case headerAlgorithm:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.Algorithm = s
|
||||
case headerNonce:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.Nonce = s
|
||||
default:
|
||||
if h.ExtraHeaders == nil {
|
||||
h.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
var v2 interface{}
|
||||
err = json.Unmarshal(*v, &v2)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.ExtraHeaders[k] = v2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dst rawHeader) isSet(k HeaderKey) bool {
|
||||
dvr := dst[k]
|
||||
if dvr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var dv interface{}
|
||||
err := json.Unmarshal(*dvr, &dv)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if dvStr, ok := dv.(string); ok {
|
||||
return dvStr != ""
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge headers from src into dst, giving precedence to headers from l.
|
||||
func (dst rawHeader) merge(src *rawHeader) {
|
||||
if src == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range *src {
|
||||
if dst.isSet(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
dst[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Get JOSE name of curve
|
||||
func curveName(crv elliptic.Curve) (string, error) {
|
||||
switch crv {
|
||||
case elliptic.P256():
|
||||
return "P-256", nil
|
||||
case elliptic.P384():
|
||||
return "P-384", nil
|
||||
case elliptic.P521():
|
||||
return "P-521", nil
|
||||
default:
|
||||
return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
|
||||
}
|
||||
}
|
||||
|
||||
// Get size of curve in bytes
|
||||
func curveSize(crv elliptic.Curve) int {
|
||||
bits := crv.Params().BitSize
|
||||
|
||||
div := bits / 8
|
||||
mod := bits % 8
|
||||
|
||||
if mod == 0 {
|
||||
return div
|
||||
}
|
||||
|
||||
return div + 1
|
||||
}
|
||||
|
||||
func makeRawMessage(b []byte) *json.RawMessage {
|
||||
rm := json.RawMessage(b)
|
||||
return &rm
|
||||
}
|
343
vendor/gopkg.in/square/go-jose.v2/signing.go
generated
vendored
Normal file
343
vendor/gopkg.in/square/go-jose.v2/signing.go
generated
vendored
Normal file
|
@ -0,0 +1,343 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// NonceSource represents a source of random nonces to go into JWS objects
|
||||
type NonceSource interface {
|
||||
Nonce() (string, error)
|
||||
}
|
||||
|
||||
// Signer represents a signer which takes a payload and produces a signed JWS object.
|
||||
type Signer interface {
|
||||
Sign(payload []byte) (*JSONWebSignature, error)
|
||||
Options() SignerOptions
|
||||
}
|
||||
|
||||
// SigningKey represents an algorithm/key used to sign a message.
|
||||
type SigningKey struct {
|
||||
Algorithm SignatureAlgorithm
|
||||
Key interface{}
|
||||
}
|
||||
|
||||
// SignerOptions represents options that can be set when creating signers.
|
||||
type SignerOptions struct {
|
||||
NonceSource NonceSource
|
||||
EmbedJWK bool
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
|
||||
if so.ExtraHeaders == nil {
|
||||
so.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
so.ExtraHeaders[k] = v
|
||||
return so
|
||||
}
|
||||
|
||||
// WithContentType adds a content type ("cty") header and returns the updated
|
||||
// SignerOptions.
|
||||
func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
|
||||
return so.WithHeader(HeaderContentType, contentType)
|
||||
}
|
||||
|
||||
// WithType adds a type ("typ") header and returns the updated SignerOptions.
|
||||
func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
|
||||
return so.WithHeader(HeaderType, typ)
|
||||
}
|
||||
|
||||
type payloadSigner interface {
|
||||
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
|
||||
}
|
||||
|
||||
type payloadVerifier interface {
|
||||
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
||||
}
|
||||
|
||||
type genericSigner struct {
|
||||
recipients []recipientSigInfo
|
||||
nonceSource NonceSource
|
||||
embedJWK bool
|
||||
extraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
type recipientSigInfo struct {
|
||||
sigAlg SignatureAlgorithm
|
||||
publicKey *JSONWebKey
|
||||
signer payloadSigner
|
||||
}
|
||||
|
||||
// NewSigner creates an appropriate signer based on the key type
|
||||
func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
|
||||
return NewMultiSigner([]SigningKey{sig}, opts)
|
||||
}
|
||||
|
||||
// NewMultiSigner creates a signer for multiple recipients
|
||||
func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
|
||||
signer := &genericSigner{recipients: []recipientSigInfo{}}
|
||||
|
||||
if opts != nil {
|
||||
signer.nonceSource = opts.NonceSource
|
||||
signer.embedJWK = opts.EmbedJWK
|
||||
signer.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
for _, sig := range sigs {
|
||||
err := signer.addRecipient(sig.Algorithm, sig.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// newVerifier creates a verifier based on the key type
|
||||
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
||||
switch verificationKey := verificationKey.(type) {
|
||||
case ed25519.PublicKey:
|
||||
return &edEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case *rsa.PublicKey:
|
||||
return &rsaEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case *ecdsa.PublicKey:
|
||||
return &ecEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricMac{
|
||||
key: verificationKey,
|
||||
}, nil
|
||||
case JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.recipients = append(ctx.recipients, recipient)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
|
||||
switch signingKey := signingKey.(type) {
|
||||
case ed25519.PrivateKey:
|
||||
return newEd25519Signer(alg, signingKey)
|
||||
case *rsa.PrivateKey:
|
||||
return newRSASigner(alg, signingKey)
|
||||
case *ecdsa.PrivateKey:
|
||||
return newECDSASigner(alg, signingKey)
|
||||
case []byte:
|
||||
return newSymmetricSigner(alg, signingKey)
|
||||
case JSONWebKey:
|
||||
return newJWKSigner(alg, signingKey)
|
||||
case *JSONWebKey:
|
||||
return newJWKSigner(alg, *signingKey)
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey.Key)
|
||||
if err != nil {
|
||||
return recipientSigInfo{}, err
|
||||
}
|
||||
if recipient.publicKey != nil {
|
||||
// recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
|
||||
// was created for the inner key (such as a RSA or ECDSA public key). It contains
|
||||
// the pub key for embedding, but doesn't have extra params like key id.
|
||||
publicKey := signingKey
|
||||
publicKey.Key = recipient.publicKey.Key
|
||||
recipient.publicKey = &publicKey
|
||||
|
||||
// This should be impossible, but let's check anyway.
|
||||
if !recipient.publicKey.IsPublic() {
|
||||
return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public")
|
||||
}
|
||||
}
|
||||
return recipient, nil
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
||||
obj := &JSONWebSignature{}
|
||||
obj.payload = payload
|
||||
obj.Signatures = make([]Signature, len(ctx.recipients))
|
||||
|
||||
for i, recipient := range ctx.recipients {
|
||||
protected := map[HeaderKey]interface{}{
|
||||
headerAlgorithm: string(recipient.sigAlg),
|
||||
}
|
||||
|
||||
if recipient.publicKey != nil {
|
||||
// We want to embed the JWK or set the kid header, but not both. Having a protected
|
||||
// header that contains an embedded JWK while also simultaneously containing the kid
|
||||
// header is confusing, and at least in ACME the two are considered to be mutually
|
||||
// exclusive. The fact that both can exist at the same time is a somewhat unfortunate
|
||||
// result of the JOSE spec. We've decided that this library will only include one or
|
||||
// the other to avoid this confusion.
|
||||
//
|
||||
// See https://github.com/square/go-jose/issues/157 for more context.
|
||||
if ctx.embedJWK {
|
||||
protected[headerJWK] = recipient.publicKey
|
||||
} else {
|
||||
protected[headerKeyID] = recipient.publicKey.KeyID
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.nonceSource != nil {
|
||||
nonce, err := ctx.nonceSource.Nonce()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
|
||||
}
|
||||
protected[headerNonce] = nonce
|
||||
}
|
||||
|
||||
for k, v := range ctx.extraHeaders {
|
||||
protected[k] = v
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(protected)
|
||||
|
||||
input := []byte(fmt.Sprintf("%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(payload)))
|
||||
|
||||
signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureInfo.protected = &rawHeader{}
|
||||
for k, v := range protected {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err)
|
||||
}
|
||||
(*signatureInfo.protected)[k] = makeRawMessage(b)
|
||||
}
|
||||
obj.Signatures[i] = signatureInfo
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) Options() SignerOptions {
|
||||
return SignerOptions{
|
||||
NonceSource: ctx.nonceSource,
|
||||
EmbedJWK: ctx.embedJWK,
|
||||
ExtraHeaders: ctx.extraHeaders,
|
||||
}
|
||||
}
|
||||
|
||||
// Verify validates the signature on the object and returns the payload.
|
||||
// This function does not support multi-signature, if you desire multi-sig
|
||||
// verification use VerifyMulti instead.
|
||||
//
|
||||
// Be careful when verifying signatures based on embedded JWKs inside the
|
||||
// payload header. You cannot assume that the key received in a payload is
|
||||
// trusted.
|
||||
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
||||
verifier, err := newVerifier(verificationKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(obj.Signatures) > 1 {
|
||||
return nil, errors.New("square/go-jose: too many signatures in payload; expecting only one")
|
||||
}
|
||||
|
||||
signature := obj.Signatures[0]
|
||||
headers := signature.mergedHeaders()
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(critical) > 0 {
|
||||
// Unsupported crit header
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
input := obj.computeAuthData(&signature)
|
||||
alg := headers.getSignatureAlgorithm()
|
||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
||||
if err == nil {
|
||||
return obj.payload, nil
|
||||
}
|
||||
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// VerifyMulti validates (one of the multiple) signatures on the object and
|
||||
// returns the index of the signature that was verified, along with the signature
|
||||
// object and the payload. We return the signature and index to guarantee that
|
||||
// callers are getting the verified value.
|
||||
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
|
||||
verifier, err := newVerifier(verificationKey)
|
||||
if err != nil {
|
||||
return -1, Signature{}, nil, err
|
||||
}
|
||||
|
||||
for i, signature := range obj.Signatures {
|
||||
headers := signature.mergedHeaders()
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(critical) > 0 {
|
||||
// Unsupported crit header
|
||||
continue
|
||||
}
|
||||
|
||||
input := obj.computeAuthData(&signature)
|
||||
alg := headers.getSignatureAlgorithm()
|
||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
||||
if err == nil {
|
||||
return i, signature, obj.payload, nil
|
||||
}
|
||||
}
|
||||
|
||||
return -1, Signature{}, nil, ErrCryptoFailure
|
||||
}
|
360
vendor/gopkg.in/square/go-jose.v2/symmetric.go
generated
vendored
Normal file
360
vendor/gopkg.in/square/go-jose.v2/symmetric.go
generated
vendored
Normal file
|
@ -0,0 +1,360 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/cipher"
|
||||
)
|
||||
|
||||
// Random reader (stubbed out in tests)
|
||||
var randReader = rand.Reader
|
||||
|
||||
// Dummy key cipher for shared symmetric key mode
|
||||
type symmetricKeyCipher struct {
|
||||
key []byte // Pre-shared content-encryption key
|
||||
}
|
||||
|
||||
// Signer/verifier for MAC modes
|
||||
type symmetricMac struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Input/output from an AEAD operation
|
||||
type aeadParts struct {
|
||||
iv, ciphertext, tag []byte
|
||||
}
|
||||
|
||||
// A content cipher based on an AEAD construction
|
||||
type aeadContentCipher struct {
|
||||
keyBytes int
|
||||
authtagBytes int
|
||||
getAead func(key []byte) (cipher.AEAD, error)
|
||||
}
|
||||
|
||||
// Random key generator
|
||||
type randomKeyGenerator struct {
|
||||
size int
|
||||
}
|
||||
|
||||
// Static key generator
|
||||
type staticKeyGenerator struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-GCM
|
||||
func newAESGCM(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize,
|
||||
authtagBytes: 16,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cipher.NewGCM(aes)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-CBC+HMAC
|
||||
func newAESCBC(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize * 2,
|
||||
authtagBytes: 16,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
return josecipher.NewCBCHMAC(key, aes.NewCipher)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get an AEAD cipher object for the given content encryption algorithm
|
||||
func getContentCipher(alg ContentEncryption) contentCipher {
|
||||
switch alg {
|
||||
case A128GCM:
|
||||
return newAESGCM(16)
|
||||
case A192GCM:
|
||||
return newAESGCM(24)
|
||||
case A256GCM:
|
||||
return newAESGCM(32)
|
||||
case A128CBC_HS256:
|
||||
return newAESCBC(16)
|
||||
case A192CBC_HS384:
|
||||
return newAESCBC(24)
|
||||
case A256CBC_HS512:
|
||||
return newAESCBC(32)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
|
||||
func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
|
||||
switch keyAlg {
|
||||
case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &symmetricKeyCipher{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newSymmetricSigner creates a recipientSigInfo based on the given key.
|
||||
func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case HS256, HS384, HS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
signer: &symmetricMac{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generate a random key for the given content cipher
|
||||
func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
key := make([]byte, ctx.size)
|
||||
_, err := io.ReadFull(randReader, key)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
return key, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for random generator
|
||||
func (ctx randomKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Generate a static key (for direct mode)
|
||||
func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for static generator
|
||||
func (ctx staticKeyGenerator) keySize() int {
|
||||
return len(ctx.key)
|
||||
}
|
||||
|
||||
// Get key size for this cipher
|
||||
func (ctx aeadContentCipher) keySize() int {
|
||||
return ctx.keyBytes
|
||||
}
|
||||
|
||||
// Encrypt some data
|
||||
func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
|
||||
// Get a new AEAD instance
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new nonce
|
||||
iv := make([]byte, aead.NonceSize())
|
||||
_, err = io.ReadFull(randReader, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
|
||||
offset := len(ciphertextAndTag) - ctx.authtagBytes
|
||||
|
||||
return &aeadParts{
|
||||
iv: iv,
|
||||
ciphertext: ciphertextAndTag[:offset],
|
||||
tag: ciphertextAndTag[offset:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decrypt some data
|
||||
func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
|
||||
}
|
||||
|
||||
// Encrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case DIRECT:
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
parts, err := aead.encrypt(ctx.key, []byte{}, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
header := &rawHeader{}
|
||||
header.set(headerIV, newBuffer(parts.iv))
|
||||
header.set(headerTag, newBuffer(parts.tag))
|
||||
|
||||
return recipientInfo{
|
||||
header: header,
|
||||
encryptedKey: parts.ciphertext,
|
||||
}, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
switch headers.getAlgorithm() {
|
||||
case DIRECT:
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
iv, err := headers.getIV()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err)
|
||||
}
|
||||
tag, err := headers.getTag()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err)
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: iv.bytes(),
|
||||
ciphertext: recipient.encryptedKey,
|
||||
tag: tag.bytes(),
|
||||
}
|
||||
|
||||
cek, err := aead.decrypt(ctx.key, []byte{}, parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cek, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cek, nil
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
mac, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return Signature{}, errors.New("square/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: mac,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
|
||||
expected, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return errors.New("square/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
if len(mac) != len(expected) {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
match := subtle.ConstantTimeCompare(mac, expected)
|
||||
if match != 1 {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the HMAC based on the given alg value
|
||||
func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
|
||||
var hash func() hash.Hash
|
||||
|
||||
switch alg {
|
||||
case HS256:
|
||||
hash = sha256.New
|
||||
case HS384:
|
||||
hash = sha512.New384
|
||||
case HS512:
|
||||
hash = sha512.New
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hmac := hmac.New(hash, ctx.key)
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hmac.Write(payload)
|
||||
return hmac.Sum(nil), nil
|
||||
}
|
Loading…
Reference in a new issue