diff --git a/glide.lock b/glide.lock index f271caef2..2be78ea15 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 90d53da9a6eaba85d524d95410051ff7f12b1f76181df2ad4796b2439f3a2a37 -updated: 2017-10-24T14:08:11.364720581+02:00 +hash: 70e3ed39d2777d36b679ff55b8b0f2eb8d0b239fdfb8108f41b8b7239bc7920c +updated: 2017-10-25T10:18:20.438135-04:00 imports: - name: cloud.google.com/go version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c @@ -56,13 +56,14 @@ imports: - service/route53 - service/sts - name: github.com/Azure/azure-sdk-for-go - version: 088007b3b08cc02b27f2eadfdcd870958460ce7e + version: f7bb4db3ea4c73dc58bd284c38ea644a79324be0 subpackages: - arm/dns - name: github.com/Azure/go-autorest - version: a2fdd780c9a50455cecd249b00bdc3eb73a78e31 + version: f6be1abbb5abd0517522f850dd785990d373da7e subpackages: - autorest + - autorest/adal - autorest/azure - autorest/date - autorest/to @@ -121,11 +122,11 @@ imports: subpackages: - spew - name: github.com/decker502/dnspod-go - version: 68650ee11e182e30773781d391c66a0c80ccf9f2 + version: f33a2c6040fc2550a631de7b3a53bddccdcd73fb - name: github.com/dgrijalva/jwt-go version: d2709f9f1f31ebcda9651b03077758c1f3a0018c - name: github.com/dnsimple/dnsimple-go - version: 5a5b427618a76f9eed5ede0f3e6306fbd9311d2e + version: f2d9b723cc9547d182e24ac2e527ae25d25fc93f subpackages: - dnsimple - name: github.com/docker/distribution @@ -219,7 +220,7 @@ imports: - name: github.com/eapache/queue version: 44cc805cf13205b55f69e14bcb69867d1ae92f98 - name: github.com/edeckers/auroradnsclient - version: 8b777c170cfd377aa16bb4368f093017dddef3f9 + version: 398f53855ba258191157e20fabfaccca5e13cea9 subpackages: - records - requests @@ -233,6 +234,8 @@ imports: subpackages: - log - swagger +- name: github.com/exoscale/egoscale + version: 325740036187ddae3a5b74be00fbbc70011c4d96 - name: github.com/fatih/color version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 - name: github.com/gambol99/go-marathon @@ -240,7 +243,7 @@ imports: - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini - version: e7fea39b01aea8d5671f6858f0532f56e8bff3a5 + version: f384f410798cbe7cdce40eec40b79ed32bb4f1ad - name: github.com/go-kit/kit version: f66b0e13579bfc5a48b9e2a94b1209c107ea1f41 subpackages: @@ -309,7 +312,7 @@ imports: - name: github.com/imdario/mergo version: 3e95a51e0639b4cf372f2ccf74c86749d747fbdc - name: github.com/JamesClonk/vultr - version: 0f156dd232bc4ebf8a32ba83fec57c0e4c9db69f + version: 2fd0705ce648e602e6c9c57329a174270a4f6688 subpackages: - lib - name: github.com/jmespath/go-jmespath @@ -398,7 +401,7 @@ imports: - specs-go - specs-go/v1 - name: github.com/ovh/go-ovh - version: d2207178e10e4527e8f222fd8707982df8c3af17 + version: 4b1fea467323b74c5f462f0947f402b428ca0626 subpackages: - ovh - name: github.com/pborman/uuid @@ -432,10 +435,6 @@ imports: version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e -- name: github.com/pyr/egoscale - version: 987e683a7552f34ee586217d1cc8507d52e80ab9 - subpackages: - - src/egoscale - name: github.com/rancher/go-rancher version: 5b8f6cc26b355ba03d7611fce3844155b7baf05b subpackages: @@ -509,7 +508,7 @@ imports: - plugin/rewrite - router - name: github.com/xenolf/lego - version: 5dfe609afb1ebe9da97c9846d97a55415e5a5ccd + version: 67c86d860a797ce2483f50d9174d4ed24984bef2 subpackages: - acme - providers/dns @@ -527,6 +526,7 @@ imports: - providers/dns/linode - providers/dns/namecheap - providers/dns/ns1 + - providers/dns/otc - providers/dns/ovh - providers/dns/pdns - providers/dns/rackspace @@ -585,7 +585,7 @@ imports: subpackages: - rate - name: google.golang.org/api - version: 9bf6e6e569ff057f75d9604a46c52928f17d2b54 + version: 1575df15c1bb8b18ad4d9bc5ca495cc85b0764fe subpackages: - dns/v1 - gensupport @@ -622,9 +622,9 @@ imports: - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/ini.v1 - version: e7fea39b01aea8d5671f6858f0532f56e8bff3a5 + version: 5b3e00af70a9484542169a976dcab8d03e601a17 - name: gopkg.in/ns1/ns1-go.v2 - version: 2abc76c60bf88ba33b15d1d87a13f624d8dff956 + version: c563826f4cbef9c11bebeb9f20a3f7afe9c1e2f4 subpackages: - rest - rest/model/account @@ -751,7 +751,7 @@ imports: - transport testImports: - name: github.com/Azure/go-ansiterm - version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e + version: d6e3b3328b783f23731bc4d058875b0371ff8109 subpackages: - winterm - name: github.com/docker/cli diff --git a/glide.yaml b/glide.yaml index 07e8e2b95..72c622c80 100644 --- a/glide.yaml +++ b/glide.yaml @@ -59,7 +59,7 @@ import: - package: github.com/vulcand/predicate version: 19b9dde14240d94c804ae5736ad0e1de10bf8fe6 - package: github.com/xenolf/lego - version: 5dfe609afb1ebe9da97c9846d97a55415e5a5ccd + version: 67c86d860a797ce2483f50d9174d4ed24984bef2 subpackages: - acme - package: gopkg.in/fsnotify.v1 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 000000000..2d1d72608 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/client.go index 05ef36995..714b698b5 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/client.go @@ -17,9 +17,8 @@ package dns // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/models.go index a95db4a29..a81f49b8e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/models.go @@ -14,9 +14,8 @@ package dns // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -42,8 +41,7 @@ const ( Continue HTTPStatusCode = "Continue" // Created specifies the created state for http status code. Created HTTPStatusCode = "Created" - // ExpectationFailed specifies the expectation failed state for http status - // code. + // ExpectationFailed specifies the expectation failed state for http status code. ExpectationFailed HTTPStatusCode = "ExpectationFailed" // Forbidden specifies the forbidden state for http status code. Forbidden HTTPStatusCode = "Forbidden" @@ -53,29 +51,23 @@ const ( GatewayTimeout HTTPStatusCode = "GatewayTimeout" // Gone specifies the gone state for http status code. Gone HTTPStatusCode = "Gone" - // HTTPVersionNotSupported specifies the http version not supported state - // for http status code. + // HTTPVersionNotSupported specifies the http version not supported state for http status code. HTTPVersionNotSupported HTTPStatusCode = "HttpVersionNotSupported" - // InternalServerError specifies the internal server error state for http - // status code. + // InternalServerError specifies the internal server error state for http status code. InternalServerError HTTPStatusCode = "InternalServerError" // LengthRequired specifies the length required state for http status code. LengthRequired HTTPStatusCode = "LengthRequired" - // MethodNotAllowed specifies the method not allowed state for http status - // code. + // MethodNotAllowed specifies the method not allowed state for http status code. MethodNotAllowed HTTPStatusCode = "MethodNotAllowed" // Moved specifies the moved state for http status code. Moved HTTPStatusCode = "Moved" - // MovedPermanently specifies the moved permanently state for http status - // code. + // MovedPermanently specifies the moved permanently state for http status code. MovedPermanently HTTPStatusCode = "MovedPermanently" - // MultipleChoices specifies the multiple choices state for http status - // code. + // MultipleChoices specifies the multiple choices state for http status code. MultipleChoices HTTPStatusCode = "MultipleChoices" // NoContent specifies the no content state for http status code. NoContent HTTPStatusCode = "NoContent" - // NonAuthoritativeInformation specifies the non authoritative information - // state for http status code. + // NonAuthoritativeInformation specifies the non authoritative information state for http status code. NonAuthoritativeInformation HTTPStatusCode = "NonAuthoritativeInformation" // NotAcceptable specifies the not acceptable state for http status code. NotAcceptable HTTPStatusCode = "NotAcceptable" @@ -89,55 +81,43 @@ const ( OK HTTPStatusCode = "OK" // PartialContent specifies the partial content state for http status code. PartialContent HTTPStatusCode = "PartialContent" - // PaymentRequired specifies the payment required state for http status - // code. + // PaymentRequired specifies the payment required state for http status code. PaymentRequired HTTPStatusCode = "PaymentRequired" - // PreconditionFailed specifies the precondition failed state for http - // status code. + // PreconditionFailed specifies the precondition failed state for http status code. PreconditionFailed HTTPStatusCode = "PreconditionFailed" - // ProxyAuthenticationRequired specifies the proxy authentication required - // state for http status code. + // ProxyAuthenticationRequired specifies the proxy authentication required state for http status code. ProxyAuthenticationRequired HTTPStatusCode = "ProxyAuthenticationRequired" // Redirect specifies the redirect state for http status code. Redirect HTTPStatusCode = "Redirect" - // RedirectKeepVerb specifies the redirect keep verb state for http status - // code. + // RedirectKeepVerb specifies the redirect keep verb state for http status code. RedirectKeepVerb HTTPStatusCode = "RedirectKeepVerb" // RedirectMethod specifies the redirect method state for http status code. RedirectMethod HTTPStatusCode = "RedirectMethod" - // RequestedRangeNotSatisfiable specifies the requested range not - // satisfiable state for http status code. + // RequestedRangeNotSatisfiable specifies the requested range not satisfiable state for http status code. RequestedRangeNotSatisfiable HTTPStatusCode = "RequestedRangeNotSatisfiable" - // RequestEntityTooLarge specifies the request entity too large state for - // http status code. + // RequestEntityTooLarge specifies the request entity too large state for http status code. RequestEntityTooLarge HTTPStatusCode = "RequestEntityTooLarge" // RequestTimeout specifies the request timeout state for http status code. RequestTimeout HTTPStatusCode = "RequestTimeout" - // RequestURITooLong specifies the request uri too long state for http - // status code. + // RequestURITooLong specifies the request uri too long state for http status code. RequestURITooLong HTTPStatusCode = "RequestUriTooLong" // ResetContent specifies the reset content state for http status code. ResetContent HTTPStatusCode = "ResetContent" // SeeOther specifies the see other state for http status code. SeeOther HTTPStatusCode = "SeeOther" - // ServiceUnavailable specifies the service unavailable state for http - // status code. + // ServiceUnavailable specifies the service unavailable state for http status code. ServiceUnavailable HTTPStatusCode = "ServiceUnavailable" - // SwitchingProtocols specifies the switching protocols state for http - // status code. + // SwitchingProtocols specifies the switching protocols state for http status code. SwitchingProtocols HTTPStatusCode = "SwitchingProtocols" - // TemporaryRedirect specifies the temporary redirect state for http status - // code. + // TemporaryRedirect specifies the temporary redirect state for http status code. TemporaryRedirect HTTPStatusCode = "TemporaryRedirect" // Unauthorized specifies the unauthorized state for http status code. Unauthorized HTTPStatusCode = "Unauthorized" - // UnsupportedMediaType specifies the unsupported media type state for http - // status code. + // UnsupportedMediaType specifies the unsupported media type state for http status code. UnsupportedMediaType HTTPStatusCode = "UnsupportedMediaType" // Unused specifies the unused state for http status code. Unused HTTPStatusCode = "Unused" - // UpgradeRequired specifies the upgrade required state for http status - // code. + // UpgradeRequired specifies the upgrade required state for http status code. UpgradeRequired HTTPStatusCode = "UpgradeRequired" // UseProxy specifies the use proxy state for http status code. UseProxy HTTPStatusCode = "UseProxy" @@ -223,8 +203,7 @@ type PtrRecord struct { Ptrdname *string `json:"ptrdname,omitempty"` } -// RecordSet is describes a DNS record set (a collection of DNS records with -// the same name and type). +// RecordSet is describes a DNS record set (a collection of DNS records with the same name and type). type RecordSet struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -253,8 +232,7 @@ func (client RecordSetListResult) RecordSetListResultPreparer() (*http.Request, autorest.WithBaseURL(to.String(client.NextLink))) } -// RecordSetProperties is represents the properties of the records in the -// record set. +// RecordSetProperties is represents the properties of the records in the record set. type RecordSetProperties struct { Metadata *map[string]*string `json:"metadata,omitempty"` TTL *int64 `json:"TTL,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go index 240c42660..8d59929f7 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go @@ -14,9 +14,8 @@ package dns // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -34,35 +33,32 @@ func NewRecordSetsClient(subscriptionID string) RecordSetsClient { return NewRecordSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient -// client. +// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient client. func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate creates or updates a record set within a DNS zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). relativeRecordSetName is the name -// of the record set, relative to the name of the zone. recordType is the type -// of DNS record in this record set. Record sets of type SOA can be updated but -// not created (they are created when the DNS zone is created). parameters is -// parameters supplied to the CreateOrUpdate operation. ifMatch is the etag of -// the record set. Omit this value to always overwrite the current record set. -// Specify the last-seen etag value to prevent accidentally overwritting any -// concurrent changes. ifNoneMatch is set to '*' to allow a new record set to -// be created, but to prevent updating an existing record set. Other values -// will be ignored. +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the type +// of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created when the +// DNS zone is created). parameters is parameters supplied to the CreateOrUpdate operation. ifMatch is the etag of the +// record set. Omit this value to always overwrite the current record set. Specify the last-seen etag value to prevent +// accidentally overwritting any concurrent changes. ifNoneMatch is set to '*' to allow a new record set to be created, +// but to prevent updating an existing record set. Other values will be ignored. func (client RecordSetsClient) CreateOrUpdate(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") + return } result, err = client.CreateOrUpdateResponder(resp) @@ -125,27 +121,25 @@ func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (res return } -// Delete deletes a record set from a DNS zone. This operation cannot be -// undone. +// Delete deletes a record set from a DNS zone. This operation cannot be undone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). relativeRecordSetName is the name -// of the record set, relative to the name of the zone. recordType is the type -// of DNS record in this record set. Record sets of type SOA cannot be deleted -// (they are deleted when the DNS zone is deleted). ifMatch is the etag of the -// record set. Omit this value to always delete the current record set. Specify -// the last-seen etag value to prevent accidentally deleting any concurrent -// changes. +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the type +// of DNS record in this record set. Record sets of type SOA cannot be deleted (they are deleted when the DNS zone is +// deleted). ifMatch is the etag of the record set. Omit this value to always delete the current record set. Specify +// the last-seen etag value to prevent accidentally deleting any concurrent changes. func (client RecordSetsClient) Delete(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request") + return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request") + return } result, err = client.DeleteResponder(resp) @@ -203,20 +197,21 @@ func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result auto // Get gets a record set. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). relativeRecordSetName is the name -// of the record set, relative to the name of the zone. recordType is the type +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the type // of DNS record in this record set. func (client RecordSetsClient) Get(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) { req, err := client.GetPreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -271,19 +266,23 @@ func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordS // ListByDNSZone lists all record sets in a DNS zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). top is the maximum number of -// record sets to return. If not specified, returns up to 100 record sets. -func (client RecordSetsClient) ListByDNSZone(resourceGroupName string, zoneName string, top *int32) (result RecordSetListResult, err error) { - req, err := client.ListByDNSZonePreparer(resourceGroupName, zoneName, top) +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). top is the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordsetnamesuffix is the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListByDNSZone(resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResult, err error) { + req, err := client.ListByDNSZonePreparer(resourceGroupName, zoneName, top, recordsetnamesuffix) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing request") + return } resp, err := client.ListByDNSZoneSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending request") + return } result, err = client.ListByDNSZoneResponder(resp) @@ -295,7 +294,7 @@ func (client RecordSetsClient) ListByDNSZone(resourceGroupName string, zoneName } // ListByDNSZonePreparer prepares the ListByDNSZone request. -func (client RecordSetsClient) ListByDNSZonePreparer(resourceGroupName string, zoneName string, top *int32) (*http.Request, error) { +func (client RecordSetsClient) ListByDNSZonePreparer(resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), @@ -309,6 +308,9 @@ func (client RecordSetsClient) ListByDNSZonePreparer(resourceGroupName string, z if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -361,22 +363,70 @@ func (client RecordSetsClient) ListByDNSZoneNextResults(lastResults RecordSetLis return } +// ListByDNSZoneComplete gets all elements from the list without paging. +func (client RecordSetsClient) ListByDNSZoneComplete(resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string, cancel <-chan struct{}) (<-chan RecordSet, <-chan error) { + resultChan := make(chan RecordSet) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByDNSZone(resourceGroupName, zoneName, top, recordsetnamesuffix) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByDNSZoneNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListByType lists the record sets of a specified type in a DNS zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). recordType is the type of record -// sets to enumerate. top is the maximum number of record sets to return. If -// not specified, returns up to 100 record sets. -func (client RecordSetsClient) ListByType(resourceGroupName string, zoneName string, recordType RecordType, top *int32) (result RecordSetListResult, err error) { - req, err := client.ListByTypePreparer(resourceGroupName, zoneName, recordType, top) +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). recordType is the type of record sets to enumerate. top is the maximum number of record sets to return. If not +// specified, returns up to 100 record sets. recordsetnamesuffix is the suffix label of the record set name that has to +// be used to filter the record set enumerations. If this parameter is specified, Enumeration will return only records +// that end with . +func (client RecordSetsClient) ListByType(resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResult, err error) { + req, err := client.ListByTypePreparer(resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request") + return } resp, err := client.ListByTypeSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request") + return } result, err = client.ListByTypeResponder(resp) @@ -388,7 +438,7 @@ func (client RecordSetsClient) ListByType(resourceGroupName string, zoneName str } // ListByTypePreparer prepares the ListByType request. -func (client RecordSetsClient) ListByTypePreparer(resourceGroupName string, zoneName string, recordType RecordType, top *int32) (*http.Request, error) { +func (client RecordSetsClient) ListByTypePreparer(resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (*http.Request, error) { pathParameters := map[string]interface{}{ "recordType": autorest.Encode("path", recordType), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -403,6 +453,9 @@ func (client RecordSetsClient) ListByTypePreparer(resourceGroupName string, zone if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -455,25 +508,70 @@ func (client RecordSetsClient) ListByTypeNextResults(lastResults RecordSetListRe return } +// ListByTypeComplete gets all elements from the list without paging. +func (client RecordSetsClient) ListByTypeComplete(resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string, cancel <-chan struct{}) (<-chan RecordSet, <-chan error) { + resultChan := make(chan RecordSet) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByType(resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByTypeNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // Update updates a record set within a DNS zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). relativeRecordSetName is the name -// of the record set, relative to the name of the zone. recordType is the type -// of DNS record in this record set. parameters is parameters supplied to the -// Update operation. ifMatch is the etag of the record set. Omit this value to -// always overwrite the current record set. Specify the last-seen etag value to +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the type +// of DNS record in this record set. parameters is parameters supplied to the Update operation. ifMatch is the etag of +// the record set. Omit this value to always overwrite the current record set. Specify the last-seen etag value to // prevent accidentally overwritting concurrent changes. func (client RecordSetsClient) Update(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (result RecordSet, err error) { req, err := client.UpdatePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request") + return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request") + return } result, err = client.UpdateResponder(resp) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go index 530508bf7..d5ade9afd 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go @@ -14,16 +14,15 @@ package dns // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v9.0.0-beta arm-dns/2016-04-01" + return "Azure-SDK-For-Go/v11.0.0-beta arm-dns/2016-04-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v9.0.0-beta" + return "v11.0.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go index e421f8d9c..33b230b03 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go @@ -14,9 +14,8 @@ package dns // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -39,26 +38,25 @@ func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClien return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records -// within the zone. +// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records within the zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). parameters is parameters supplied -// to the CreateOrUpdate operation. ifMatch is the etag of the DNS zone. Omit -// this value to always overwrite the current zone. Specify the last-seen etag -// value to prevent accidentally overwritting any concurrent changes. -// ifNoneMatch is set to '*' to allow a new DNS zone to be created, but to -// prevent updating an existing zone. Other values will be ignored. +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). parameters is parameters supplied to the CreateOrUpdate operation. ifMatch is the etag of the DNS zone. Omit +// this value to always overwrite the current zone. Specify the last-seen etag value to prevent accidentally +// overwritting any concurrent changes. ifNoneMatch is set to '*' to allow a new DNS zone to be created, but to prevent +// updating an existing zone. Other values will be ignored. func (client ZonesClient) CreateOrUpdate(resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request") + return } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request") + return } result, err = client.CreateOrUpdateResponder(resp) @@ -119,35 +117,46 @@ func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Z return } -// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be -// deleted. This operation cannot be undone. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot be +// undone. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). ifMatch is the etag of the DNS -// zone. Omit this value to always delete the current zone. Specify the -// last-seen etag value to prevent accidentally deleting any concurrent -// changes. -func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request") - } +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). ifMatch is the etag of the DNS zone. Omit this value to always delete the current zone. Specify the last-seen +// etag value to prevent accidentally deleting any concurrent changes. +func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string, cancel <-chan struct{}) (<-chan ZoneDeleteResult, <-chan error) { + resultChan := make(chan ZoneDeleteResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ZoneDeleteResult + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request") + return + } - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure sending request") - } + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure sending request") + return + } - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure responding to request") - } - - return + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -185,31 +194,33 @@ func (client ZonesClient) DeleteSender(req *http.Request) (*http.Response, error // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client ZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ZonesClient) DeleteResponder(resp *http.Response) (result ZoneDeleteResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Get gets a DNS zone. Retrieves the zone properties, but not the record sets -// within the zone. +// Get gets a DNS zone. Retrieves the zone properties, but not the record sets within the zone. // -// resourceGroupName is the name of the resource group. zoneName is the name of -// the DNS zone (without a terminating dot). +// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating +// dot). func (client ZonesClient) Get(resourceGroupName string, zoneName string) (result Zone, err error) { req, err := client.GetPreparer(resourceGroupName, zoneName) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -262,18 +273,19 @@ func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err er // List lists the DNS zones in all resource groups in a subscription. // -// top is the maximum number of DNS zones to return. If not specified, returns -// up to 100 zones. +// top is the maximum number of DNS zones to return. If not specified, returns up to 100 zones. func (client ZonesClient) List(top *int32) (result ZoneListResult, err error) { req, err := client.ListPreparer(top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -349,21 +361,67 @@ func (client ZonesClient) ListNextResults(lastResults ZoneListResult) (result Zo return } +// ListComplete gets all elements from the list without paging. +func (client ZonesClient) ListComplete(top *int32, cancel <-chan struct{}) (<-chan Zone, <-chan error) { + resultChan := make(chan Zone) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(top) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListByResourceGroup lists the DNS zones within a resource group. // -// resourceGroupName is the name of the resource group. top is the maximum -// number of record sets to return. If not specified, returns up to 100 record -// sets. +// resourceGroupName is the name of the resource group. top is the maximum number of record sets to return. If not +// specified, returns up to 100 record sets. func (client ZonesClient) ListByResourceGroup(resourceGroupName string, top *int32) (result ZoneListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName, top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request") + return } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request") + return } result, err = client.ListByResourceGroupResponder(resp) @@ -439,3 +497,48 @@ func (client ZonesClient) ListByResourceGroupNextResults(lastResults ZoneListRes return } + +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client ZonesClient) ListByResourceGroupComplete(resourceGroupName string, top *int32, cancel <-chan struct{}) (<-chan Zone, <-chan error) { + resultChan := make(chan Zone) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName, top) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go index 1bd6057da..bcbe00d0c 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -5,7 +5,7 @@ type csiEntryState struct { } func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) + csiState.parser.logf("CsiEntry::Handle %#x", b) nextState, err := csiState.baseState.Handle(b) if nextState != nil || err != nil { @@ -25,7 +25,7 @@ func (csiState csiEntryState) Handle(b byte) (s state, e error) { } func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) csiState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go index 4be35c5fd..7ed5e01c3 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -5,7 +5,7 @@ type csiParamState struct { } func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) + csiState.parser.logf("CsiParam::Handle %#x", b) nextState, err := csiState.baseState.Handle(b) if nextState != nil || err != nil { @@ -26,7 +26,7 @@ func (csiState csiParamState) Handle(b byte) (s state, e error) { } func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) csiState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go index 2189eb6b6..1c719db9e 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -5,7 +5,7 @@ type escapeIntermediateState struct { } func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) + escState.parser.logf("escapeIntermediateState::Handle %#x", b) nextState, err := escState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err @@ -24,7 +24,7 @@ func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { } func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) escState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go index 7b1b9ad3f..6390abd23 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -5,7 +5,7 @@ type escapeState struct { } func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) + escState.parser.logf("escapeState::Handle %#x", b) nextState, err := escState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err @@ -28,7 +28,7 @@ func (escState escapeState) Handle(b byte) (s state, e error) { } func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) escState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 24062d420..593b10ab6 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -5,7 +5,7 @@ type oscStringState struct { } func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) + oscState.parser.logf("OscString::Handle %#x", b) nextState, err := oscState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go index 3286a9cb5..03cec7ada 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -2,14 +2,10 @@ package ansiterm import ( "errors" - "io/ioutil" + "log" "os" - - "github.com/sirupsen/logrus" ) -var logger *logrus.Logger - type AnsiParser struct { currState state eventHandler AnsiEventHandler @@ -23,50 +19,69 @@ type AnsiParser struct { ground state oscString state stateMap []state + + logf func(string, ...interface{}) } -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard +type Option func(*AnsiParser) - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f } +} - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ eventHandler: evtHandler, context: &ansiContext{}, } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, + for _, o := range opts { + o(ap) } - parser.currState = getState(initialState, parser.stateMap) + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } - logger.Infof("CreateParser: parser %p", parser) - return parser + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap } func getState(name string, states []state) state { @@ -97,7 +112,7 @@ func (ap *AnsiParser) handle(b byte) error { } if newState == nil { - logger.Warning("newState is nil") + ap.logf("WARNING: newState is nil") return errors.New("New state of 'nil' is invalid.") } @@ -111,23 +126,23 @@ func (ap *AnsiParser) handle(b byte) error { } func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) // Exit old state if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) return err } // Perform transition action if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) return err } // Enter new state if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) return err } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go index 8b69a67a5..de0a1f9cd 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -27,7 +27,6 @@ func parseParams(bytes []byte) ([]string, error) { params = append(params, s) } - logger.Infof("Parsed params: %v with length: %d", params, len(params)) return params, nil } @@ -37,7 +36,6 @@ func parseCmd(context ansiContext) (string, error) { func getInt(params []string, dflt int) int { i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) return i } @@ -60,8 +58,6 @@ func getInts(params []string, minCount int, dflt int) []int { } } - logger.Infof("getInts: %v", ints) - return ints } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go index 58750a2d2..0bb5e51e9 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -1,19 +1,15 @@ package ansiterm -import ( - "fmt" -) - func (ap *AnsiParser) collectParam() error { currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) + ap.logf("collectParam %#x", currChar) ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) return nil } func (ap *AnsiParser) collectInter() error { currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) + ap.logf("collectInter %#x", currChar) ap.context.paramBuffer = append(ap.context.interBuffer, currChar) return nil } @@ -21,8 +17,8 @@ func (ap *AnsiParser) collectInter() error { func (ap *AnsiParser) escDispatch() error { cmd, _ := parseCmd(*ap.context) intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) switch cmd { case "D": // IND @@ -43,8 +39,9 @@ func (ap *AnsiParser) escDispatch() error { func (ap *AnsiParser) csiDispatch() error { cmd, _ := parseCmd(*ap.context) params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) - logger.Infof("csiDispatch: %v(%v)", cmd, params) + ap.logf("csiDispatch: %v(%v)", cmd, params) switch cmd { case "@": @@ -102,7 +99,7 @@ func (ap *AnsiParser) csiDispatch() error { top, bottom := ints[0], ints[1] return ap.eventHandler.DECSTBM(top, bottom) default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) return nil } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go index daf2f0696..a67327972 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -175,7 +175,7 @@ func GetStdFile(nFile int) (*os.File, uintptr) { fd, err := syscall.GetStdHandle(nFile) if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) } return file, uintptr(fd) diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go index 462d92f8e..6055e33b9 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -49,17 +49,22 @@ var ( const ( // Console modes // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 // Character attributes // Note: diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go index f015723ad..3ee06ea72 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -34,7 +34,7 @@ func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL if err != nil { return err } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) return err } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go index 706d27057..2d27fa1d0 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -50,8 +50,8 @@ func (h *windowsAnsiEventHandler) insertLines(param int) error { // scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) // Copy from and clip to the scroll region (full buffer width) scrollRect := SMALL_RECT{ diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 48998bb05..2d40fb75a 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -4,16 +4,13 @@ package winterm import ( "bytes" - "io/ioutil" + "log" "os" "strconv" "github.com/Azure/go-ansiterm" - "github.com/sirupsen/logrus" ) -var logger *logrus.Logger - type windowsAnsiEventHandler struct { fd uintptr file *os.File @@ -28,32 +25,52 @@ type windowsAnsiEventHandler struct { marginByte byte curInfo *CONSOLE_SCREEN_BUFFER_INFO curPos COORD + logf func(string, ...interface{}) } -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard +type Option func(*windowsAnsiEventHandler) - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f } +} +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { infoReset, err := GetConsoleScreenBufferInfo(fd) if err != nil { return nil } - return &windowsAnsiEventHandler{ + h := &windowsAnsiEventHandler{ fd: fd, file: file, infoReset: infoReset, attributes: infoReset.Attributes, } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h } type scrollRegion struct { @@ -96,7 +113,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { if err := h.Flush(); err != nil { return false, err } - logger.Info("Simulating LF inside scroll region") + h.logf("Simulating LF inside scroll region") if err := h.scrollUp(1); err != nil { return false, err } @@ -119,7 +136,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { } else { // The cursor is at the bottom of the screen but outside the scroll // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") + h.logf("Simulating LF outside scroll region") if includeCR { if err := h.Flush(); err != nil { return false, err @@ -151,7 +168,7 @@ func (h *windowsAnsiEventHandler) executeLF() error { if err := h.Flush(); err != nil { return err } - logger.Info("Resetting cursor position for LF without CR") + h.logf("Resetting cursor position for LF without CR") if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return err } @@ -186,7 +203,7 @@ func (h *windowsAnsiEventHandler) Print(b byte) error { func (h *windowsAnsiEventHandler) Execute(b byte) error { switch b { case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") + h.logf("Execute(TAB)") // Move to the next tab stop, but preserve auto-wrap if already set. if !h.wrapNext { pos, info, err := h.getCurrentInfo() @@ -269,7 +286,7 @@ func (h *windowsAnsiEventHandler) CUU(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(-param) } @@ -278,7 +295,7 @@ func (h *windowsAnsiEventHandler) CUD(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(param) } @@ -287,7 +304,7 @@ func (h *windowsAnsiEventHandler) CUF(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(param) } @@ -296,7 +313,7 @@ func (h *windowsAnsiEventHandler) CUB(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(-param) } @@ -305,7 +322,7 @@ func (h *windowsAnsiEventHandler) CNL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(param) } @@ -314,7 +331,7 @@ func (h *windowsAnsiEventHandler) CPL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(-param) } @@ -323,7 +340,7 @@ func (h *windowsAnsiEventHandler) CHA(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorColumn(param) } @@ -332,7 +349,7 @@ func (h *windowsAnsiEventHandler) VPA(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("VPA: [[%d]]", param) + h.logf("VPA: [[%d]]", param) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { @@ -348,7 +365,7 @@ func (h *windowsAnsiEventHandler) CUP(row int, col int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUP: [[%d %d]]", row, col) + h.logf("CUP: [[%d %d]]", row, col) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { @@ -364,7 +381,7 @@ func (h *windowsAnsiEventHandler) HVP(row int, col int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("HVP: [[%d %d]]", row, col) + h.logf("HVP: [[%d %d]]", row, col) h.clearWrap() return h.CUP(row, col) } @@ -373,7 +390,7 @@ func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) h.clearWrap() return nil } @@ -382,7 +399,7 @@ func (h *windowsAnsiEventHandler) DECOM(enable bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) h.clearWrap() h.originMode = enable return h.CUP(1, 1) @@ -392,7 +409,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) h.clearWrap() if err := h.ED(2); err != nil { return err @@ -407,7 +424,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { } if info.Size.X < targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) + h.logf("set buffer failed: %v", err) return err } } @@ -415,12 +432,12 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { window.Left = 0 window.Right = targetWidth - 1 if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) + h.logf("set window failed: %v", err) return err } if info.Size.X > targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) + h.logf("set buffer failed: %v", err) return err } } @@ -431,7 +448,7 @@ func (h *windowsAnsiEventHandler) ED(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() // [J -- Erases from the cursor to the end of the screen, including the cursor position. @@ -490,7 +507,7 @@ func (h *windowsAnsiEventHandler) EL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("EL: [%v]", strconv.Itoa(param)) + h.logf("EL: [%v]", strconv.Itoa(param)) h.clearWrap() // [K -- Erases from the cursor to the end of the line, including the cursor position. @@ -531,7 +548,7 @@ func (h *windowsAnsiEventHandler) IL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("IL: [%v]", strconv.Itoa(param)) + h.logf("IL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertLines(param) } @@ -540,7 +557,7 @@ func (h *windowsAnsiEventHandler) DL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DL: [%v]", strconv.Itoa(param)) + h.logf("DL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteLines(param) } @@ -549,7 +566,7 @@ func (h *windowsAnsiEventHandler) ICH(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) + h.logf("ICH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertCharacters(param) } @@ -558,7 +575,7 @@ func (h *windowsAnsiEventHandler) DCH(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) + h.logf("DCH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteCharacters(param) } @@ -572,7 +589,7 @@ func (h *windowsAnsiEventHandler) SGR(params []int) error { strings = append(strings, strconv.Itoa(v)) } - logger.Infof("SGR: [%v]", strings) + h.logf("SGR: [%v]", strings) if len(params) <= 0 { h.attributes = h.infoReset.Attributes @@ -606,7 +623,7 @@ func (h *windowsAnsiEventHandler) SU(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollUp(param) } @@ -615,13 +632,13 @@ func (h *windowsAnsiEventHandler) SD(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollDown(param) } func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) + h.logf("DA: [%v]", params) // DA cannot be implemented because it must send data on the VT100 input stream, // which is not available to go-ansiterm. return nil @@ -631,7 +648,7 @@ func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) + h.logf("DECSTBM: [%d, %d]", top, bottom) // Windows is 0 indexed, Linux is 1 indexed h.sr.top = int16(top - 1) @@ -646,7 +663,7 @@ func (h *windowsAnsiEventHandler) RI() error { if err := h.Flush(); err != nil { return err } - logger.Info("RI: []") + h.logf("RI: []") h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) @@ -663,21 +680,21 @@ func (h *windowsAnsiEventHandler) RI() error { } func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") + h.logf("IND: []") return h.executeLF() } func (h *windowsAnsiEventHandler) Flush() error { h.curInfo = nil if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) + h.logf("Flush: [%s]", h.buffer.Bytes()) if _, err := h.buffer.WriteTo(h.file); err != nil { return err } } if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) + h.logf("Flush: drawing margin byte '%c'", h.marginByte) info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..12375e0e4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,51 @@ +package adal + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go similarity index 64% rename from vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index e1d5498a8..6c511f8c8 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -1,4 +1,4 @@ -package azure +package adal /* This file is largely based on rjw57/oauth2device's code, with the follow differences: @@ -10,16 +10,17 @@ package azure */ import ( + "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" + "strings" "time" - - "github.com/Azure/go-autorest/autorest" ) const ( - logPrefix = "autorest/azure/devicetoken:" + logPrefix = "autorest/adal/devicetoken:" ) var ( @@ -38,10 +39,17 @@ var ( // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" errTokenSendingFails = "Error occurred while sending request with device code for a token" errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" ) // DeviceCode is the object returned by the device auth endpoint @@ -79,31 +87,45 @@ type deviceToken struct { // InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode // that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - }), - ) +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } - resp, err := autorest.SendWithSender(client, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty } var code DeviceCode - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&code), - autorest.ByClosing()) + err = json.Unmarshal(rb, &code) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) } code.ClientID = clientID @@ -115,33 +137,46 @@ func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, client // CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint // to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - }), - ) +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } - resp, err := autorest.SendWithSender(client, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty } var token deviceToken - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), - autorest.ByUnmarshallingJSON(&token), - autorest.ByClosing()) + err = json.Unmarshal(rb, &token) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) } if token.Error == nil { @@ -164,12 +199,12 @@ func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, // WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. // This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { intervalDuration := time.Duration(*code.Interval) * time.Second waitDuration := intervalDuration for { - token, err := CheckForUserCompletion(client, code) + token, err := CheckForUserCompletion(sender, code) if err == nil { return token, nil diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 000000000..e87911e83 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,6 @@ +// +build !windows + +package adal + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 000000000..80f800432 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package adal + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go similarity index 99% rename from vendor/github.com/Azure/go-autorest/autorest/azure/persist.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index d5cf62ddc..73711c667 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..7928c971a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,46 @@ +package adal + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go similarity index 68% rename from vendor/github.com/Azure/go-autorest/autorest/azure/token.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/token.go index cfcd03011..2ac8c3c22 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "crypto/rand" @@ -6,19 +6,21 @@ import ( "crypto/sha1" "crypto/x509" "encoding/base64" + "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" "strconv" + "strings" "time" - "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" "github.com/dgrijalva/jwt-go" ) const ( defaultRefresh = 5 * time.Minute - tokenBaseDate = "1970-01-01T00:00:00Z" // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow OAuthGrantTypeDeviceCode = "device_code" @@ -28,12 +30,21 @@ const ( // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" ) -var expirationBase time.Time +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error } // TokenRefreshCallback is the type representing callbacks that will be called after @@ -59,7 +70,10 @@ func (t Token) Expires() time.Time { if err != nil { s = -3600 } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() } // IsExpired returns true if the Token is expired, false otherwise. @@ -73,14 +87,9 @@ func (t Token) WillExpireIn(d time.Duration) bool { return !t.Expires().After(time.Now().Add(d)) } -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the Token. -func (t *Token) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) - }) - } +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken } // ServicePrincipalNoSecret represents a secret type that contains no secret @@ -118,6 +127,15 @@ type ServicePrincipalCertificateSecret struct { PrivateKey *rsa.PrivateKey } +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + // SignJwt returns the JWT signed with the certificate's private key. func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { hasher := sha1.New() @@ -173,7 +191,7 @@ type ServicePrincipalToken struct { resource string autoRefresh bool refreshWithin time.Duration - sender autorest.Sender + sender Sender refreshCallbacks []TokenRefreshCallback } @@ -238,10 +256,58 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) +} + +func getMSIVMEndpoint(path string) (string, error) { + // Read MSI settings + bytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + msiSettings := struct { + URL string `json:"url"` + }{} + err = json.Unmarshal(bytes, &msiSettings) + if err != nil { + return "", err + } + + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") + if err != nil { + return nil, err + } + + spt := &ServicePrincipalToken{ + oauthConfig: *oauthConfig, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + return spt, nil +} + // EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin). +// RefreshWithin) and autoRefresh flag is on. func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.WillExpireIn(spt.refreshWithin) { + if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { return spt.Refresh() } return nil @@ -253,8 +319,7 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { for _, callback := range spt.refreshCallbacks { err := callback(spt.Token) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) } } } @@ -287,39 +352,48 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { } } - req, _ := autorest.Prepare(&http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), - autorest.WithFormData(v)) - - resp, err := autorest.SendWithSender(spt.sender, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", - spt.clientID) + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } - var newToken Token - err = autorest.Respond(resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&newToken), - autorest.ByClosing()) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } + resp, err := spt.sender.Do(req) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", - spt.clientID) + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) } - spt.Token = newToken + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) - err = spt.InvokeRefreshCallbacks(newToken) - if err != nil { - // its already wrapped inside InvokeRefreshCallbacks - return err + if resp.StatusCode != http.StatusOK { + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) } - return nil + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.Token = token + + return spt.InvokeRefreshCallbacks(token) } // SetAutoRefresh enables or disables automatic refreshing of stale tokens. @@ -334,30 +408,6 @@ func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { return } -// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// SetSender sets the http.Client used when obtaining the Service Principal token. An // undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { - spt.sender = s -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. -// -// By default, the token will automatically refresh if nearly expired (as determined by the -// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing -// tokens. -func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - if spt.autoRefresh { - err := spt.EnsureFresh() - if err != nil { - return r, autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", - r.URL) - } - } - return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) - }) - } -} +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..314ed7876 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,167 @@ +package autorest + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return (ba.withBearerAuthorization()(p)).Prepare(r) + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return ba.WithAuthorization()(p).Prepare(r) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 6e076981f..332a8909d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -17,12 +17,6 @@ const ( ) const ( - methodDelete = "DELETE" - methodPatch = "PATCH" - methodPost = "POST" - methodPut = "PUT" - methodGet = "GET" - operationInProgress string = "InProgress" operationCanceled string = "Canceled" operationFailed string = "Failed" @@ -226,7 +220,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // Lastly, requests against an existing resource, use the last request URI if ps.uri == "" { m := strings.ToUpper(req.Method) - if m == methodPatch || m == methodPut || m == methodGet { + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { ps.uri = req.URL.String() } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go deleted file mode 100644 index bea30b0d6..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package azure - -import ( - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 4701b4376..1cf55651f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -2,14 +2,9 @@ package azure import ( "fmt" - "net/url" "strings" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, "AZUREGERMANCLOUD": GermanCloud, @@ -133,35 +128,3 @@ func EnvironmentFromName(name string) (Environment, error) { } return env, nil } - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls -func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { - return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) -} - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint -func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - template := "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index b5f94b5c3..5f1e72fbe 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -35,6 +35,7 @@ var ( statusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 http.StatusBadGateway, // 502 http.StatusServiceUnavailable, // 503 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..e085c77ee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,109 @@ +package date + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go index 4bcb8f27b..aaef2ac8e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -31,6 +31,9 @@ type DetailedError struct { // Service Error is the response body of failed API in bytes ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response } // NewError creates a new Error conforming object from the passed packageType, method, and @@ -67,6 +70,7 @@ func NewErrorWithError(original error, packageType string, method string, resp * Method: method, StatusCode: statusCode, Message: fmt.Sprintf(message, args...), + Response: resp, } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index c9deb261a..afd114821 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -426,18 +426,3 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato }) } } - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..0ab1eb300 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,38 @@ +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..e28eb2cbd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,44 @@ +// +build !go1.8 + +package autorest + +import ( + "bytes" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader + reset bool +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.reset { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + } + rr.reset = false + if err != nil { + return err + } + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + // indicates that the request body needs to be reset + rr.reset = true + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..8c1d1aec8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,56 @@ +// +build go1.8 + +package autorest + +import ( + "bytes" + "io" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader + reset bool +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.reset { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + } + rr.reset = false + if err != nil { + return err + } + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + // indicates that the request body needs to be reset + rr.reset = true + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 9c0697815..94b029847 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -1,12 +1,11 @@ package autorest import ( - "bytes" "fmt" - "io/ioutil" "log" "math" "net/http" + "strconv" "time" ) @@ -175,8 +174,13 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) for attempt := 0; attempt < attempts; attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } @@ -194,29 +198,43 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - b := []byte{} - if r.Body != nil { - b, err = ioutil.ReadAll(r.Body) - if err != nil { - return resp, err - } - } - + rr := NewRetriableRequest(r) // Increment to add the first call (attempts denotes number of retries) attempts++ for attempt := 0; attempt < attempts; attempt++ { - r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err != nil || !ResponseHasStatusCode(resp, codes...) { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + delayed := DelayWithRetryAfter(resp, r.Cancel) + if !delayed { + DelayForBackoff(backoff, attempt, r.Cancel) + } } return resp, err }) } } +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + // DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal // to or greater than the specified duration, exponentially backing off between requests using the // supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the @@ -224,9 +242,14 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) end := time.Now().Add(d) for attempt := 0; time.Now().Before(end); attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index e325f4ce2..a222e8efa 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,29 +1,35 @@ package autorest import ( + "bytes" "fmt" "strings" "sync" ) const ( - major = 7 - minor = 3 - patch = 1 + major = 8 + minor = 0 + patch = 0 tag = "" ) -var versionLock sync.Once +var once sync.Once var version string // Version returns the semantic version (see http://semver.org). func Version() string { - versionLock.Do(func() { - version = fmt.Sprintf("v%d.%d.%d", major, minor, patch) - - if trimmed := strings.TrimPrefix(tag, "-"); trimmed != "" { - version = fmt.Sprintf("%s-%s", version, trimmed) + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } } + version = verBuilder.String() }) return version } diff --git a/vendor/github.com/JamesClonk/vultr/lib/account_info.go b/vendor/github.com/JamesClonk/vultr/lib/account_info.go index 8568461ee..f994f9242 100644 --- a/vendor/github.com/JamesClonk/vultr/lib/account_info.go +++ b/vendor/github.com/JamesClonk/vultr/lib/account_info.go @@ -35,19 +35,31 @@ func (a *AccountInfo) UnmarshalJSON(data []byte) (err error) { return err } - b, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["balance"]), 64) + value := fmt.Sprintf("%v", fields["balance"]) + if len(value) == 0 || value == "" { + value = "0" + } + b, err := strconv.ParseFloat(value, 64) if err != nil { return err } a.Balance = b - pc, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["pending_charges"]), 64) + value = fmt.Sprintf("%v", fields["pending_charges"]) + if len(value) == 0 || value == "" { + value = "0" + } + pc, err := strconv.ParseFloat(value, 64) if err != nil { return err } a.PendingCharges = pc - lpa, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["last_payment_amount"]), 64) + value = fmt.Sprintf("%v", fields["last_payment_amount"]) + if len(value) == 0 || value == "" { + value = "0" + } + lpa, err := strconv.ParseFloat(value, 64) if err != nil { return err } diff --git a/vendor/github.com/JamesClonk/vultr/lib/client.go b/vendor/github.com/JamesClonk/vultr/lib/client.go index 61df58e08..0f805a10c 100644 --- a/vendor/github.com/JamesClonk/vultr/lib/client.go +++ b/vendor/github.com/JamesClonk/vultr/lib/client.go @@ -1,6 +1,7 @@ package lib import ( + "bytes" "crypto/tls" "encoding/json" "errors" @@ -168,8 +169,25 @@ func (c *Client) do(req *http.Request, data interface{}) error { // Throttle http requests to avoid hitting Vultr's API rate-limit c.bucket.Wait(1) + // Request body gets drained on each read so we + // need to save it's content for retrying requests + var err error + var requestBody []byte + if req.Body != nil { + requestBody, err = ioutil.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("Error reading request body: %v", err) + } + req.Body.Close() + } + var apiError error for tryCount := 1; tryCount <= c.MaxAttempts; tryCount++ { + // Restore request body to the original state + if requestBody != nil { + req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBody)) + } + resp, err := c.client.Do(req) if err != nil { return err diff --git a/vendor/github.com/JamesClonk/vultr/lib/dns.go b/vendor/github.com/JamesClonk/vultr/lib/dns.go index 77a86166f..e66275b68 100644 --- a/vendor/github.com/JamesClonk/vultr/lib/dns.go +++ b/vendor/github.com/JamesClonk/vultr/lib/dns.go @@ -72,7 +72,7 @@ func (c *Client) GetDNSRecords(domain string) (records []DNSRecord, err error) { func (c *Client) CreateDNSDomain(domain, serverIP string) error { values := url.Values{ "domain": {domain}, - "serverIP": {serverIP}, + "serverip": {serverIP}, } if err := c.post(`dns/create_domain`, values, nil); err != nil { diff --git a/vendor/github.com/JamesClonk/vultr/lib/firewall.go b/vendor/github.com/JamesClonk/vultr/lib/firewall.go new file mode 100644 index 000000000..c9c54e9d5 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/firewall.go @@ -0,0 +1,248 @@ +package lib + +import ( + "encoding/json" + "fmt" + "net" + "net/url" + "sort" + "strconv" + "strings" +) + +// FirewallGroup represents a firewall group on Vultr +type FirewallGroup struct { + ID string `json:"FIREWALLGROUPID"` + Description string `json:"description"` + Created string `json:"date_created"` + Modified string `json:"date_modified"` + InstanceCount int `json:"instance_count"` + RuleCount int `json:"rule_count"` + MaxRuleCount int `json:"max_rule_count"` +} + +// FirewallRule represents a firewall rule on Vultr +type FirewallRule struct { + RuleNumber int `json:"rulenumber"` + Action string `json:"action"` + Protocol string `json:"protocol"` + Port string `json:"port"` + Network *net.IPNet +} + +type firewallGroups []FirewallGroup + +func (f firewallGroups) Len() int { return len(f) } +func (f firewallGroups) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f firewallGroups) Less(i, j int) bool { + // sort order: description + return strings.ToLower(f[i].Description) < strings.ToLower(f[j].Description) +} + +type firewallRules []FirewallRule + +func (r firewallRules) Len() int { return len(r) } +func (r firewallRules) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r firewallRules) Less(i, j int) bool { + // sort order: rule number + return r[i].RuleNumber < r[j].RuleNumber +} + +// UnmarshalJSON implements json.Unmarshaller on FirewallRule. +// This is needed because the Vultr API is inconsistent in it's JSON responses. +// Some fields can change type, from JSON number to JSON string and vice-versa. +func (r *FirewallRule) UnmarshalJSON(data []byte) (err error) { + if r == nil { + *r = FirewallRule{} + } + + var fields map[string]interface{} + if err := json.Unmarshal(data, &fields); err != nil { + return err + } + + value := fmt.Sprintf("%v", fields["rulenumber"]) + if len(value) == 0 || value == "" { + value = "0" + } + number, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + r.RuleNumber = int(number) + + value = fmt.Sprintf("%v", fields["subnet_size"]) + if len(value) == 0 || value == "" { + value = "0" + } + subnetSize, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + + r.Action = fmt.Sprintf("%v", fields["action"]) + r.Protocol = fmt.Sprintf("%v", fields["protocol"]) + r.Port = fmt.Sprintf("%v", fields["port"]) + subnet := fmt.Sprintf("%v", fields["subnet"]) + + if subnetSize > 0 && len(subnet) > 0 { + _, r.Network, err = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet, subnetSize)) + if err != nil { + return fmt.Errorf("Failed to parse subnet from Vultr API") + } + } else { + _, r.Network, _ = net.ParseCIDR("0.0.0.0/0") + } + + return +} + +// GetFirewallGroups returns a list of all available firewall groups on Vultr +func (c *Client) GetFirewallGroups() ([]FirewallGroup, error) { + var groupMap map[string]FirewallGroup + if err := c.get(`firewall/group_list`, &groupMap); err != nil { + return nil, err + } + + var groupList []FirewallGroup + for _, g := range groupMap { + groupList = append(groupList, g) + } + sort.Sort(firewallGroups(groupList)) + return groupList, nil +} + +// GetFirewallGroup returns the firewall group with given ID +func (c *Client) GetFirewallGroup(id string) (FirewallGroup, error) { + groups, err := c.GetFirewallGroups() + if err != nil { + return FirewallGroup{}, err + } + + for _, g := range groups { + if g.ID == id { + return g, nil + } + } + return FirewallGroup{}, fmt.Errorf("Firewall group with ID %v not found", id) +} + +// CreateFirewallGroup creates a new firewall group in Vultr account +func (c *Client) CreateFirewallGroup(description string) (string, error) { + values := url.Values{} + + // Optional description + if len(description) > 0 { + values.Add("description", description) + } + + var result FirewallGroup + err := c.post(`firewall/group_create`, values, &result) + if err != nil { + return "", err + } + return result.ID, nil +} + +// DeleteFirewallGroup deletes an existing firewall group +func (c *Client) DeleteFirewallGroup(groupID string) error { + values := url.Values{ + "FIREWALLGROUPID": {groupID}, + } + + if err := c.post(`firewall/group_delete`, values, nil); err != nil { + return err + } + return nil +} + +// SetFirewallGroupDescription sets the description of an existing firewall group +func (c *Client) SetFirewallGroupDescription(groupID, description string) error { + values := url.Values{ + "FIREWALLGROUPID": {groupID}, + "description": {description}, + } + + if err := c.post(`firewall/group_set_description`, values, nil); err != nil { + return err + } + return nil +} + +// GetFirewallRules returns a list of rules for the given firewall group +func (c *Client) GetFirewallRules(groupID string) ([]FirewallRule, error) { + var ruleMap map[string]FirewallRule + ipTypes := []string{"v4", "v6"} + for _, ipType := range ipTypes { + args := fmt.Sprintf("direction=in&FIREWALLGROUPID=%s&ip_type=%s", + groupID, ipType) + if err := c.get(`firewall/rule_list?`+args, &ruleMap); err != nil { + return nil, err + } + } + + var ruleList []FirewallRule + for _, r := range ruleMap { + ruleList = append(ruleList, r) + } + sort.Sort(firewallRules(ruleList)) + return ruleList, nil +} + +// CreateFirewallRule creates a new firewall rule in Vultr account. +// groupID is the ID of the firewall group to create the rule in +// protocol must be one of: "icmp", "tcp", "udp", "gre" +// port can be a port number or colon separated port range (TCP/UDP only) +func (c *Client) CreateFirewallRule(groupID, protocol, port string, + network *net.IPNet) (int, error) { + ip := network.IP.String() + maskBits, _ := network.Mask.Size() + if ip == "" { + return 0, fmt.Errorf("Invalid network") + } + + var ipType string + if network.IP.To4() != nil { + ipType = "v4" + } else { + ipType = "v6" + } + + values := url.Values{ + "FIREWALLGROUPID": {groupID}, + // possible values: "in" + "direction": {"in"}, + // possible values: "icmp", "tcp", "udp", "gre" + "protocol": {protocol}, + // possible values: "v4", "v6" + "ip_type": {ipType}, + // IP address representing a subnet + "subnet": {ip}, + // IP prefix size in bits + "subnet_size": {fmt.Sprintf("%v", maskBits)}, + } + + if len(port) > 0 { + values.Add("port", port) + } + + var result FirewallRule + err := c.post(`firewall/rule_create`, values, &result) + if err != nil { + return 0, err + } + return result.RuleNumber, nil +} + +// DeleteFirewallRule deletes an existing firewall rule +func (c *Client) DeleteFirewallRule(ruleNumber int, groupID string) error { + values := url.Values{ + "FIREWALLGROUPID": {groupID}, + "rulenumber": {fmt.Sprintf("%v", ruleNumber)}, + } + + if err := c.post(`firewall/rule_delete`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/ip.go b/vendor/github.com/JamesClonk/vultr/lib/ip.go index de21b1fed..4d169cb3f 100644 --- a/vendor/github.com/JamesClonk/vultr/lib/ip.go +++ b/vendor/github.com/JamesClonk/vultr/lib/ip.go @@ -1,6 +1,7 @@ package lib import ( + "fmt" "net/url" "sort" ) @@ -78,6 +79,32 @@ func (c *Client) ListIPv4(id string) (list []IPv4, err error) { return list, nil } +// CreateIPv4 creates an IPv4 address and attaches it to a virtual machine +func (c *Client) CreateIPv4(id string, reboot bool) error { + values := url.Values{ + "SUBID": {id}, + "reboot": {fmt.Sprintf("%t", reboot)}, + } + + if err := c.post(`server/create_ipv4`, values, nil); err != nil { + return err + } + return nil +} + +// DeleteIPv4 deletes an IPv4 address and detaches it from a virtual machine +func (c *Client) DeleteIPv4(id, ip string) error { + values := url.Values{ + "SUBID": {id}, + "ip": {ip}, + } + + if err := c.post(`server/destroy_ipv4`, values, nil); err != nil { + return err + } + return nil +} + // ListIPv6 lists the IPv4 information of a virtual machine func (c *Client) ListIPv6(id string) (list []IPv6, err error) { var ipMap map[string][]IPv6 diff --git a/vendor/github.com/JamesClonk/vultr/lib/servers.go b/vendor/github.com/JamesClonk/vultr/lib/servers.go index 862b441a6..418198a71 100644 --- a/vendor/github.com/JamesClonk/vultr/lib/servers.go +++ b/vendor/github.com/JamesClonk/vultr/lib/servers.go @@ -40,6 +40,7 @@ type Server struct { Tag string `json:"tag"` OSID string `json:"OSID"` AppID string `json:"APPID"` + FirewallGroupID string `json:"FIREWALLGROUPID"` } // ServerOptions are optional parameters to be used during server creation @@ -50,6 +51,7 @@ type ServerOptions struct { UserData string Snapshot string SSHKey string + ReservedIP string IPV6 bool PrivateNetworking bool AutoBackups bool @@ -57,6 +59,7 @@ type ServerOptions struct { Hostname string Tag string AppID string + FirewallGroupID string } type servers []Server @@ -166,11 +169,17 @@ func (s *Server) UnmarshalJSON(data []byte) (err error) { s.OSID = value value = fmt.Sprintf("%v", fields["APPID"]) - if value == "" { + if value == "" || value == "0" { value = "" } s.AppID = value + value = fmt.Sprintf("%v", fields["FIREWALLGROUPID"]) + if value == "" || value == "0" { + value = "" + } + s.FirewallGroupID = value + s.ID = fmt.Sprintf("%v", fields["SUBID"]) s.Name = fmt.Sprintf("%v", fields["label"]) s.OS = fmt.Sprintf("%v", fields["os"]) @@ -280,6 +289,10 @@ func (c *Client) CreateServer(name string, regionID, planID, osID int, options * values.Add("SSHKEYID", options.SSHKey) } + if options.ReservedIP != "" { + values.Add("reserved_ip_v4", options.ReservedIP) + } + values.Add("enable_ipv6", "no") if options.IPV6 { values.Set("enable_ipv6", "yes") @@ -311,6 +324,10 @@ func (c *Client) CreateServer(name string, regionID, planID, osID int, options * if options.AppID != "" { values.Add("APPID", options.AppID) } + + if options.FirewallGroupID != "" { + values.Add("FIREWALLGROUPID", options.FirewallGroupID) + } } var server Server @@ -337,6 +354,19 @@ func (c *Client) RenameServer(id, name string) error { return nil } +// TagServer replaces the tag on an existing virtual machine +func (c *Client) TagServer(id, tag string) error { + values := url.Values{ + "SUBID": {id}, + "tag": {tag}, + } + + if err := c.post(`server/tag_set`, values, nil); err != nil { + return err + } + return nil +} + // StartServer starts an existing virtual machine func (c *Client) StartServer(id string) error { values := url.Values{ @@ -457,6 +487,24 @@ func (c *Client) DeleteServer(id string) error { return nil } +// SetFirewallGroup adds a virtual machine to a firewall group +func (c *Client) SetFirewallGroup(id, firewallgroup string) error { + values := url.Values{ + "SUBID": {id}, + "FIREWALLGROUPID": {firewallgroup}, + } + + if err := c.post(`server/firewall_group_set`, values, nil); err != nil { + return err + } + return nil +} + +// UnsetFirewallGroup removes a virtual machine from a firewall group +func (c *Client) UnsetFirewallGroup(id string) error { + return c.SetFirewallGroup(id, "0") +} + // BandwidthOfServer retrieves the bandwidth used by a virtual machine func (c *Client) BandwidthOfServer(id string) (bandwidth []map[string]string, err error) { var bandwidthMap map[string][][]string diff --git a/vendor/github.com/decker502/dnspod-go/domains.go b/vendor/github.com/decker502/dnspod-go/domains.go index d287dee58..8cd524639 100644 --- a/vendor/github.com/decker502/dnspod-go/domains.go +++ b/vendor/github.com/decker502/dnspod-go/domains.go @@ -15,18 +15,18 @@ type DomainsService struct { } type DomainInfo struct { - DomainTotal int `json:"domain_total,omitempty"` - AllTotal int `json:"all_total,omitempty"` - MineTotal int `json:"mine_total,omitempty"` - ShareTotal int `json:"share_total,omitempty"` - VipTotal int `json:"vip_total,omitempty"` - IsMarkTotal int `json:"ismark_total,omitempty"` - PauseTotal int `json:"pause_total,omitempty"` - ErrorTotal int `json:"error_total,omitempty"` - LockTotal int `json:"lock_total,omitempty"` - SpamTotal int `json:"spam_total,omitempty"` - VipExpire int `json:"vip_expire,omitempty"` - ShareOutTotal int `json:"share_out_total,omitempty"` + DomainTotal int `json:"domain_total,omitempty"` + AllTotal int `json:"all_total,omitempty"` + MineTotal int `json:"mine_total,omitempty"` + ShareTotal string `json:"share_total,omitempty"` + VipTotal int `json:"vip_total,omitempty"` + IsMarkTotal int `json:"ismark_total,omitempty"` + PauseTotal int `json:"pause_total,omitempty"` + ErrorTotal int `json:"error_total,omitempty"` + LockTotal int `json:"lock_total,omitempty"` + SpamTotal int `json:"spam_total,omitempty"` + VipExpire int `json:"vip_expire,omitempty"` + ShareOutTotal int `json:"share_out_total,omitempty"` } type Domain struct { diff --git a/vendor/github.com/edeckers/auroradnsclient/LICENSE b/vendor/github.com/edeckers/auroradnsclient/LICENSE index 9cecc1d46..a612ad981 100644 --- a/vendor/github.com/edeckers/auroradnsclient/LICENSE +++ b/vendor/github.com/edeckers/auroradnsclient/LICENSE @@ -1,674 +1,373 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {one line to give the program's name and a brief idea of what it does.} - Copyright (C) {year} {name of author} - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - {project} Copyright (C) {year} {fullname} - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/pyr/egoscale/LICENSE b/vendor/github.com/exoscale/egoscale/LICENSE similarity index 100% rename from vendor/github.com/pyr/egoscale/LICENSE rename to vendor/github.com/exoscale/egoscale/LICENSE diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/affinitygroup.go b/vendor/github.com/exoscale/egoscale/affinitygroup.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/affinitygroup.go rename to vendor/github.com/exoscale/egoscale/affinitygroup.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/async.go b/vendor/github.com/exoscale/egoscale/async.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/async.go rename to vendor/github.com/exoscale/egoscale/async.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/dns.go b/vendor/github.com/exoscale/egoscale/dns.go similarity index 60% rename from vendor/github.com/pyr/egoscale/src/egoscale/dns.go rename to vendor/github.com/exoscale/egoscale/dns.go index e3da06d77..3a341bf92 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/dns.go +++ b/vendor/github.com/exoscale/egoscale/dns.go @@ -10,9 +10,10 @@ func (exo *Client) CreateDomain(name string) (*DNSDomain, error) { var hdr = make(http.Header) var domain DNSDomainCreateRequest - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) domain.Domain.Name = name - m, err := json.Marshal(domain); if err != nil { + m, err := json.Marshal(domain) + if err != nil { return nil, err } @@ -32,10 +33,10 @@ func (exo *Client) CreateDomain(name string) (*DNSDomain, error) { func (exo *Client) GetDomain(name string) (*DNSDomain, error) { var hdr = make(http.Header) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") - resp, err := exo.DetailedRequest("/v1/domains/" + name, "", "GET", hdr) + resp, err := exo.DetailedRequest("/v1/domains/"+name, "", "GET", hdr) if err != nil { return nil, err } @@ -48,12 +49,12 @@ func (exo *Client) GetDomain(name string) (*DNSDomain, error) { return d, nil } -func (exo *Client) DeleteDomain(name string) (error) { +func (exo *Client) DeleteDomain(name string) error { var hdr = make(http.Header) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") - _, err := exo.DetailedRequest("/v1/domains/" + name, "", "DELETE", hdr) + _, err := exo.DetailedRequest("/v1/domains/"+name, "", "DELETE", hdr) if err != nil { return err } @@ -63,10 +64,10 @@ func (exo *Client) DeleteDomain(name string) (error) { func (exo *Client) GetRecords(name string) ([]*DNSRecordResponse, error) { var hdr = make(http.Header) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") - resp, err := exo.DetailedRequest("/v1/domains/" + name + "/records", "", "GET", hdr) + resp, err := exo.DetailedRequest("/v1/domains/"+name+"/records", "", "GET", hdr) if err != nil { return nil, err } @@ -79,20 +80,21 @@ func (exo *Client) GetRecords(name string) ([]*DNSRecordResponse, error) { return r, nil } -func(exo *Client) CreateRecord(name string, rec DNSRecord) (*DNSRecordResponse, error) { +func (exo *Client) CreateRecord(name string, rec DNSRecord) (*DNSRecordResponse, error) { var hdr = make(http.Header) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") hdr.Add("Content-Type", "application/json") var rr DNSRecordResponse rr.Record = rec - body, err := json.Marshal(rr); if err != nil { + body, err := json.Marshal(rr) + if err != nil { return nil, err } - resp, err := exo.DetailedRequest("/v1/domains/" + name + "/records", string(body), "POST", hdr) + resp, err := exo.DetailedRequest("/v1/domains/"+name+"/records", string(body), "POST", hdr) if err != nil { return nil, err } @@ -102,24 +104,25 @@ func(exo *Client) CreateRecord(name string, rec DNSRecord) (*DNSRecordResponse, return nil, err } - return r, nil + return r, nil } -func(exo *Client) UpdateRecord(name string, rec DNSRecord) (*DNSRecordResponse, error) { +func (exo *Client) UpdateRecord(name string, rec DNSRecord) (*DNSRecordResponse, error) { var hdr = make(http.Header) id := strconv.FormatInt(rec.Id, 10) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") hdr.Add("Content-Type", "application/json") var rr DNSRecordResponse rr.Record = rec - body, err := json.Marshal(rr); if err != nil { + body, err := json.Marshal(rr) + if err != nil { return nil, err } - resp, err := exo.DetailedRequest("/v1/domains/" + name + "/records/" + id, + resp, err := exo.DetailedRequest("/v1/domains/"+name+"/records/"+id, string(body), "PUT", hdr) if err != nil { return nil, err @@ -133,18 +136,18 @@ func(exo *Client) UpdateRecord(name string, rec DNSRecord) (*DNSRecordResponse, return r, nil } -func(exo *Client) DeleteRecord(name string, rec DNSRecord) (error) { +func (exo *Client) DeleteRecord(name string, rec DNSRecord) error { var hdr = make(http.Header) id := strconv.FormatInt(rec.Id, 10) - hdr.Add("X-DNS-TOKEN", exo.apiKey + ":" + exo.apiSecret) + hdr.Add("X-DNS-TOKEN", exo.apiKey+":"+exo.apiSecret) hdr.Add("Accept", "application/json") hdr.Add("Content-Type", "application/json") - _, err := exo.DetailedRequest("/v1/domains/" + name + "/records/" + id, + _, err := exo.DetailedRequest("/v1/domains/"+name+"/records/"+id, "", "DELETE", hdr) if err != nil { return err } return nil -} \ No newline at end of file +} diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/error.go b/vendor/github.com/exoscale/egoscale/error.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/error.go rename to vendor/github.com/exoscale/egoscale/error.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/groups.go b/vendor/github.com/exoscale/egoscale/groups.go similarity index 69% rename from vendor/github.com/pyr/egoscale/src/egoscale/groups.go rename to vendor/github.com/exoscale/egoscale/groups.go index f51eb82fe..278277264 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/groups.go +++ b/vendor/github.com/exoscale/egoscale/groups.go @@ -10,21 +10,21 @@ func (exo *Client) CreateEgressRule(rule SecurityGroupRule) (*AuthorizeSecurityG params := url.Values{} params.Set("securitygroupid", rule.SecurityGroupId) - - if rule.Cidr != "" { - params.Set("cidrlist", rule.Cidr) - } else if len(rule.UserSecurityGroupList) > 0 { - usg,err := json.Marshal(rule.UserSecurityGroupList) - if err != nil { - return nil, err - } - params.Set("usersecuritygrouplist", string(usg)) - } else { - return nil, fmt.Errorf("No Egress rule CIDR or Security Group List provided") - } + + if rule.Cidr != "" { + params.Set("cidrlist", rule.Cidr) + } else if len(rule.UserSecurityGroupList) > 0 { + usg, err := json.Marshal(rule.UserSecurityGroupList) + if err != nil { + return nil, err + } + params.Set("usersecuritygrouplist", string(usg)) + } else { + return nil, fmt.Errorf("No Egress rule CIDR or Security Group List provided") + } params.Set("protocol", rule.Protocol) - + if rule.Protocol == "ICMP" { params.Set("icmpcode", fmt.Sprintf("%d", rule.IcmpCode)) params.Set("icmptype", fmt.Sprintf("%d", rule.IcmpType)) @@ -52,18 +52,18 @@ func (exo *Client) CreateIngressRule(rule SecurityGroupRule) (*AuthorizeSecurity params := url.Values{} params.Set("securitygroupid", rule.SecurityGroupId) - - if rule.Cidr != "" { - params.Set("cidrlist", rule.Cidr) - } else if len(rule.UserSecurityGroupList) >0 { - for i := 0; i < len(rule.UserSecurityGroupList); i++ { - params.Set(fmt.Sprintf("usersecuritygrouplist[%d].account", i), rule.UserSecurityGroupList[i].Account) - params.Set(fmt.Sprintf("usersecuritygrouplist[%d].group", i), rule.UserSecurityGroupList[i].Group) - - } - } else { - return nil, fmt.Errorf("No Ingress rule CIDR or Security Group List provided") - } + + if rule.Cidr != "" { + params.Set("cidrlist", rule.Cidr) + } else if len(rule.UserSecurityGroupList) > 0 { + for i := 0; i < len(rule.UserSecurityGroupList); i++ { + params.Set(fmt.Sprintf("usersecuritygrouplist[%d].account", i), rule.UserSecurityGroupList[i].Account) + params.Set(fmt.Sprintf("usersecuritygrouplist[%d].group", i), rule.UserSecurityGroupList[i].Group) + + } + } else { + return nil, fmt.Errorf("No Ingress rule CIDR or Security Group List provided") + } params.Set("protocol", rule.Protocol) @@ -77,7 +77,7 @@ func (exo *Client) CreateIngressRule(rule SecurityGroupRule) (*AuthorizeSecurity return nil, fmt.Errorf("Invalid Egress rule Protocol: %s", rule.Protocol) } - fmt.Printf("## params: %+v\n", params) + fmt.Printf("## params: %+v\n", params) resp, err := exo.Request("authorizeSecurityGroupIngress", params) @@ -130,11 +130,12 @@ func (exo *Client) CreateSecurityGroupWithRules(name string, ingress []SecurityG return &r.Wrapped, nil } -func (exo *Client) DeleteSecurityGroup(name string) (error) { +func (exo *Client) DeleteSecurityGroup(name string) error { params := url.Values{} params.Set("name", name) - resp, err := exo.Request("deleteSecurityGroup", params); if err != nil { + resp, err := exo.Request("deleteSecurityGroup", params) + if err != nil { return err } diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/init.go b/vendor/github.com/exoscale/egoscale/init.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/init.go rename to vendor/github.com/exoscale/egoscale/init.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/ip.go b/vendor/github.com/exoscale/egoscale/ip.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/ip.go rename to vendor/github.com/exoscale/egoscale/ip.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/keypair.go b/vendor/github.com/exoscale/egoscale/keypair.go similarity index 100% rename from vendor/github.com/pyr/egoscale/src/egoscale/keypair.go rename to vendor/github.com/exoscale/egoscale/keypair.go diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/request.go b/vendor/github.com/exoscale/egoscale/request.go similarity index 95% rename from vendor/github.com/pyr/egoscale/src/egoscale/request.go rename to vendor/github.com/exoscale/egoscale/request.go index 8de806b79..fe08da754 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/request.go +++ b/vendor/github.com/exoscale/egoscale/request.go @@ -9,8 +9,8 @@ import ( "io/ioutil" "net/http" "net/url" - "strings" "sort" + "strings" ) func csQuotePlus(s string) string { @@ -89,11 +89,11 @@ func (exo *Client) Request(command string, params url.Values) (json.RawMessage, params.Set("command", command) params.Set("response", "json") - for k, _ := range(params) { + for k, _ := range params { keys = append(keys, k) } sort.Strings(keys) - for _, k := range(keys) { + for _, k := range keys { arg := k + "=" + csEncode(params[k][0]) unencoded = append(unencoded, arg) } @@ -113,17 +113,18 @@ func (exo *Client) Request(command string, params url.Values) (json.RawMessage, return exo.ParseResponse(resp) } - func (exo *Client) DetailedRequest(uri string, params string, method string, header http.Header) (json.RawMessage, error) { url := exo.endpoint + uri - req, err := http.NewRequest(method, url, strings.NewReader(params)); if err != nil { + req, err := http.NewRequest(method, url, strings.NewReader(params)) + if err != nil { return nil, err } req.Header = header - response, err := exo.client.Do(req); if err != nil { + response, err := exo.client.Do(req) + if err != nil { return nil, err } diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/topology.go b/vendor/github.com/exoscale/egoscale/topology.go similarity index 95% rename from vendor/github.com/pyr/egoscale/src/egoscale/topology.go rename to vendor/github.com/exoscale/egoscale/topology.go index 7634382a4..ad004dfe3 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/topology.go +++ b/vendor/github.com/exoscale/egoscale/topology.go @@ -31,12 +31,14 @@ func (exo *Client) GetSecurityGroups() (map[string]SecurityGroup, error) { func (exo *Client) GetSecurityGroupId(name string) (string, error) { params := url.Values{} - resp, err := exo.Request("listSecurityGroups", params); if err != nil { + resp, err := exo.Request("listSecurityGroups", params) + if err != nil { return "", err } var r ListSecurityGroupsResponse - err = json.Unmarshal(resp, &r); if err != nil { + err = json.Unmarshal(resp, &r) + if err != nil { return "", err } @@ -142,7 +144,7 @@ func (exo *Client) GetImages() (map[string]map[int]string, error) { images = make(map[string]map[int]string) params := url.Values{} - params.Set("templatefilter", "executable") + params.Set("templatefilter", "featured") resp, err := exo.Request("listTemplates", params) @@ -191,7 +193,7 @@ func (exo *Client) GetTopology() (*Topology, error) { return nil, err } groups := make(map[string]string) - for k,v := range securityGroups { + for k, v := range securityGroups { groups[k] = v.Id } diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/types.go b/vendor/github.com/exoscale/egoscale/types.go similarity index 91% rename from vendor/github.com/pyr/egoscale/src/egoscale/types.go rename to vendor/github.com/exoscale/egoscale/types.go index 2f918a057..0ba51b0b7 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/types.go +++ b/vendor/github.com/exoscale/egoscale/types.go @@ -33,18 +33,18 @@ type Topology struct { } type SecurityGroupRule struct { - Cidr string - IcmpType int - IcmpCode int - Port int - Protocol string - SecurityGroupId string - UserSecurityGroupList []UserSecurityGroup `json:"usersecuritygrouplist,omitempty"` + Cidr string + IcmpType int + IcmpCode int + Port int + Protocol string + SecurityGroupId string + UserSecurityGroupList []UserSecurityGroup `json:"usersecuritygrouplist,omitempty"` } type UserSecurityGroup struct { - Group string `json:"group,omitempty"` - Account string `json:"account,omitempty"` + Group string `json:"group,omitempty"` + Account string `json:"account,omitempty"` } type MachineProfile struct { @@ -390,39 +390,39 @@ type VirtualMachine struct { Networkkbsread int64 `json:"networkkbsread,omitempty"` Networkkbswrite int64 `json:"networkkbswrite,omitempty"` Nic []struct { - Broadcasturi string `json:"broadcasturi,omitempty"` - Gateway string `json:"gateway,omitempty"` - Id string `json:"id,omitempty"` - Ip6address string `json:"ip6address,omitempty"` - Ip6cidr string `json:"ip6cidr,omitempty"` - Ip6gateway string `json:"ip6gateway,omitempty"` - Ipaddress string `json:"ipaddress,omitempty"` - Isdefault bool `json:"isdefault,omitempty"` - Isolationuri string `json:"isolationuri,omitempty"` - Macaddress string `json:"macaddress,omitempty"` - Netmask string `json:"netmask,omitempty"` - Networkid string `json:"networkid,omitempty"` - Networkname string `json:"networkname,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` Secondaryip []struct { - Id string `json:"id,omitempty"` - IpAddress string `json:"ipaddress,omitempty"` + Id string `json:"id,omitempty"` + IpAddress string `json:"ipaddress,omitempty"` } `json:"secondaryip,omitempty"` - Traffictype string `json:"traffictype,omitempty"` - Type string `json:"type,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` } `json:"nic,omitempty"` - Password string `json:"password,omitempty"` - Passwordenabled bool `json:"passwordenabled,omitempty"` - Project string `json:"project,omitempty"` - Projectid string `json:"projectid,omitempty"` - Publicip string `json:"publicip,omitempty"` - Publicipid string `json:"publicipid,omitempty"` - Rootdeviceid int64 `json:"rootdeviceid,omitempty"` - Rootdevicetype string `json:"rootdevicetype,omitempty"` - SecurityGroups []struct { - Account string `json:"account,omitempty"` - Description string `json:"description,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitemtpy"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + SecurityGroups []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitemtpy"` Tags []string `json:"tags,omitempty"` } `json:"securitygroup,omitempty"` Serviceofferingid string `json:"serviceofferingid,omitempty"` @@ -465,11 +465,11 @@ type RemoveIpFromNicResponse struct { } type AddIpToNicResponse struct { - Id string `json:"id"` + Id string `json:"id"` IpAddress string `json:"ipaddress"` NetworkId string `json:"networkid"` - NicId string `json:"nicid"` - VmId string `json:"virtualmachineid"` + NicId string `json:"nicid"` + VmId string `json:"virtualmachineid"` } type CreateAffinityGroupResponse struct { diff --git a/vendor/github.com/pyr/egoscale/src/egoscale/vm.go b/vendor/github.com/exoscale/egoscale/vm.go similarity index 97% rename from vendor/github.com/pyr/egoscale/src/egoscale/vm.go rename to vendor/github.com/exoscale/egoscale/vm.go index 9668929c7..659cbce8a 100644 --- a/vendor/github.com/pyr/egoscale/src/egoscale/vm.go +++ b/vendor/github.com/exoscale/egoscale/vm.go @@ -145,7 +145,7 @@ func (exo *Client) GetVirtualMachine(id string) (*VirtualMachine, error) { func (exo *Client) ListVirtualMachines() ([]*VirtualMachine, error) { - resp, err := exo.Request("listVirtualMachines", url.Values{}) + resp, err := exo.Request("listVirtualMachines", url.Values{}) if err != nil { return nil, err } diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE index 37ec93a14..d361bbcdf 100644 --- a/vendor/github.com/go-ini/ini/LICENSE +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -176,7 +176,7 @@ recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Unknwon Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 000000000..1ba73ac80 --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,391 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // Actual data is stored here. + sections map[string]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } else { + sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:]) + } + if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { + return nil, err + } + } + + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } else { + key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) + } + if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { + return nil, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 5211d5abc..135f0689e 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -17,17 +17,12 @@ package ini import ( "bytes" - "errors" "fmt" "io" "io/ioutil" "os" "regexp" "runtime" - "strconv" - "strings" - "sync" - "time" ) const ( @@ -37,7 +32,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.27.0" + _VERSION = "1.30.2" ) // Version returns current package version literal. @@ -60,6 +55,9 @@ var ( // Explicitly write DEFAULT section header DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true ) func init() { @@ -91,18 +89,6 @@ func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { return os.Open(s.name) } -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - // sourceData represents an object that contains content in memory. type sourceData struct { data []byte @@ -121,38 +107,6 @@ func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { return s.reader, nil } -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - options LoadOptions - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - func parseDataSource(source interface{}) (dataSource, error) { switch s := source.(type) { case string: @@ -180,6 +134,8 @@ type LoadOptions struct { AllowBooleanKeys bool // AllowShadows indicates whether to keep track of keys with same name under same section. AllowShadows bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string @@ -228,329 +184,3 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { return LoadSources(LoadOptions{AllowShadows: true}, source, others...) } - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } else if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 || DefaultHeader { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err = buf.WriteString(sec.rawBody); err != nil { - return 0, err - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err = buf.WriteString(kname); err != nil { - return 0, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { - return 0, err - } - } - } - - // Put a line between sections - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go index 838356af0..ab566c2c1 100644 --- a/vendor/github.com/go-ini/ini/key.go +++ b/vendor/github.com/go-ini/ini/key.go @@ -15,6 +15,7 @@ package ini import ( + "bytes" "errors" "fmt" "strconv" @@ -25,6 +26,7 @@ import ( // Key represents a key under a section. type Key struct { s *Section + Comment string name string value string isAutoIncrement bool @@ -32,8 +34,6 @@ type Key struct { isShadow bool shadows []*Key - - Comment string } // newKey simply return a key object with given values. @@ -444,11 +444,39 @@ func (k *Key) Strings(delim string) []string { return []string{} } - vals := strings.Split(str, delim) - for i := range vals { - // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) - vals[i] = strings.TrimSpace(vals[i]) + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx += 1 + if idx == len(runes) { + break + } } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + return vals } diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index 6c0b10745..861e366e8 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -189,11 +189,11 @@ func (p *parser) readContinuationLines(val string) (string, error) { // are quotes \" or \'. // It returns false if any other parts also contain same kind of quotes. func hasSurroundedQuote(in string, quote byte) bool { - return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && strings.IndexByte(in[1:], quote) == len(in)-2 } -func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes bool) (string, error) { line := strings.TrimLeftFunc(string(in), unicode.IsSpace) if len(line) == 0 { return "", nil @@ -204,6 +204,8 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo valQuote = `"""` } else if line[0] == '`' { valQuote = "`" + } else if unescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` } if len(valQuote) > 0 { @@ -214,6 +216,9 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo return p.readMultilines(line, line[startIdx:], valQuote) } + if unescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } return line[startIdx : pos+startIdx], nil } @@ -234,7 +239,7 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo } } - // Trim single quotes + // Trim single and double quotes if hasSurroundedQuote(line, '\'') || hasSurroundedQuote(line, '"') { line = line[1 : len(line)-1] @@ -321,7 +326,10 @@ func (f *File) parse(reader io.Reader) (err error) { if err != nil { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + kname, err := p.readValue(line, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } @@ -344,7 +352,10 @@ func (f *File) parse(reader io.Reader) (err error) { p.count++ } - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + value, err := p.readValue(line[offset:], + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go index 94f7375ed..d8a402619 100644 --- a/vendor/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -54,6 +54,14 @@ func (s *Section) Body() string { return strings.TrimSpace(s.rawBody) } +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + // NewKey creates a new key to given section. func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { @@ -136,6 +144,7 @@ func (s *Section) HasKey(name string) bool { } // Haskey is a backwards-compatible name for HasKey. +// TODO: delete me in v2 func (s *Section) Haskey(name string) bool { return s.HasKey(name) } diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index 031c78b8e..e0bff67eb 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -78,7 +78,7 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() // setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { var strs []string if allowShadow { strs = key.StringsWithShadows(delim) @@ -92,26 +92,30 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh } var vals interface{} + var err error sliceOf := field.Type().Elem().Kind() switch sliceOf { case reflect.String: vals = strs case reflect.Int: - vals, _ = key.parseInts(strs, true, false) + vals, err = key.parseInts(strs, true, false) case reflect.Int64: - vals, _ = key.parseInt64s(strs, true, false) + vals, err = key.parseInt64s(strs, true, false) case reflect.Uint: - vals, _ = key.parseUints(strs, true, false) + vals, err = key.parseUints(strs, true, false) case reflect.Uint64: - vals, _ = key.parseUint64s(strs, true, false) + vals, err = key.parseUint64s(strs, true, false) case reflect.Float64: - vals, _ = key.parseFloat64s(strs, true, false) + vals, err = key.parseFloat64s(strs, true, false) case reflectTime: - vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false) + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) default: return fmt.Errorf("unsupported type '[]%s'", sliceOf) } + if err != nil && isStrict { + return err + } slice := reflect.MakeSlice(field.Type(), numVals, numVals) for i := 0; i < numVals; i++ { @@ -136,10 +140,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh return nil } +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -149,7 +160,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri case reflect.Bool: boolVal, err := key.Bool() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -161,8 +172,8 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } intVal, err := key.Int64() - if err != nil || intVal == 0 { - return nil + if err != nil { + return wrapStrictError(err, isStrict) } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte @@ -176,24 +187,24 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri uintVal, err := key.Uint64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetUint(uintVal) case reflect.Float32, reflect.Float64: floatVal, err := key.Float64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetFloat(floatVal) case reflectTime: timeVal, err := key.Time() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow) + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -212,7 +223,7 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo return rawName, omitEmpty, allowShadow } -func (s *Section) mapTo(val reflect.Value) error { +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -241,7 +252,7 @@ func (s *Section) mapTo(val reflect.Value) error { if isAnonymous || isStruct { if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { + if err = sec.mapTo(field, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } continue @@ -250,7 +261,7 @@ func (s *Section) mapTo(val reflect.Value) error { if key, err := s.GetKey(fieldName); err == nil { delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } @@ -269,7 +280,22 @@ func (s *Section) MapTo(v interface{}) error { return errors.New("cannot map to non-pointer struct") } - return s.mapTo(val) + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) } // MapTo maps file to given struct. @@ -277,6 +303,12 @@ func (f *File) MapTo(v interface{}) error { return f.Section("").MapTo(v) } +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + // MapTo maps data sources to given struct with name mapper. func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { cfg, err := Load(source, others...) @@ -287,11 +319,28 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other return cfg.MapTo(v) } +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + // MapTo maps data sources to given struct. func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + // reflectSliceWithProperType does the opposite thing as setSliceWithProperType. func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { slice := field.Slice(0, field.Len()) @@ -359,10 +408,11 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflectTime: - return v.Interface().(time.Time).IsZero() case reflect.Interface, reflect.Ptr: return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() } return false } @@ -400,6 +450,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { // Note: fieldName can never be empty here, ignore error. sec, _ = s.f.NewSection(fieldName) } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + if err = sec.reflectFrom(field); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } @@ -411,6 +467,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { if err != nil { key, _ = s.NewKey(fieldName, "") } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } diff --git a/vendor/github.com/ovh/go-ovh/ovh/ovh.go b/vendor/github.com/ovh/go-ovh/ovh/ovh.go index 737c94470..c5f7f7265 100644 --- a/vendor/github.com/ovh/go-ovh/ovh/ovh.go +++ b/vendor/github.com/ovh/go-ovh/ovh/ovh.go @@ -188,9 +188,10 @@ func (c *Client) getResponse(response *http.Response, resType interface{}) error if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusMultipleChoices { apiError := &APIError{Code: response.StatusCode} if err = json.Unmarshal(body, apiError); err != nil { - return err + apiError.Message = string(body) } apiError.QueryID = response.Header.Get("X-Ovh-QueryID") + return apiError } diff --git a/vendor/github.com/pyr/egoscale/exo.go b/vendor/github.com/pyr/egoscale/exo.go deleted file mode 100644 index 7a3c445ce..000000000 --- a/vendor/github.com/pyr/egoscale/exo.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "egoscale" - "fmt" - "flag" - "os" -) - -var apikey = flag.String("xk", "", "Exoscale API Key") -var apisecret = flag.String("xs", "", "Exoscale API Secret") -var endpoint = flag.String("xe", "https://api.exoscale.ch/compute", "Exoscale API Endpoint") - -func main() { - - flag.Parse() - client := egoscale.NewClient(*endpoint, *apikey, *apisecret) - - - vms, err := client.ListVirtualMachines() - if err != nil { - fmt.Printf("got error: %s\n", err) - os.Exit(1) - } - - for _, vm := range(vms) { - - fmt.Println("vm:", vm.Displayname) - for _, nic := range(vm.Nic) { - fmt.Println("ip:", nic.Ipaddress) - } - for _, sg := range(vm.SecurityGroups) { - fmt.Println("securitygroup:", sg.Name) - } - } - os.Exit(0) - - -} diff --git a/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE index 17460b716..270cba089 100644 --- a/vendor/github.com/xenolf/lego/LICENSE +++ b/vendor/github.com/xenolf/lego/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015 Sebastian Erhart +Copyright (c) 2015-2017 Sebastian Erhart Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go index ef0f80b7f..bcb844371 100644 --- a/vendor/github.com/xenolf/lego/acme/client.go +++ b/vendor/github.com/xenolf/lego/acme/client.go @@ -330,6 +330,10 @@ DNSNames: challenges, failures := c.getChallenges(domains) // If any challenge fails - return. Do not generate partial SAN certificates. if len(failures) > 0 { + for _, auth := range challenges { + c.disableAuthz(auth) + } + return CertificateResource{}, failures } @@ -373,6 +377,10 @@ func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto challenges, failures := c.getChallenges(domains) // If any challenge fails - return. Do not generate partial SAN certificates. if len(failures) > 0 { + for _, auth := range challenges { + c.disableAuthz(auth) + } + return CertificateResource{}, failures } @@ -493,10 +501,12 @@ func (c *Client) solveChallenges(challenges []authorizationResource) map[string] // TODO: do not immediately fail if one domain fails to validate. err := solver.Solve(authz.Body.Challenges[i], authz.Domain) if err != nil { + c.disableAuthz(authz) failures[authz.Domain] = err } } } else { + c.disableAuthz(authz) failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) } } @@ -586,6 +596,13 @@ func logAuthz(authz []authorizationResource) { } } +// cleanAuthz loops through the passed in slice and disables any auths which are not "valid" +func (c *Client) disableAuthz(auth authorizationResource) error { + var disabledAuth authorization + _, err := postJSON(c.jws, auth.AuthURL, deactivateAuthMessage{Resource: "authz", Status: "deactivated"}, &disabledAuth) + return err +} + func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, error) { if len(authz) == 0 { return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go index 30f2170ff..7c4cb80de 100644 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge.go +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge.go @@ -194,7 +194,7 @@ func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) ( if err == dns.ErrTruncated { tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} - // If the TCP request suceeds, the err will reset to nil + // If the TCP request succeeds, the err will reset to nil in, _, err = tcp.Exchange(m, ns) } diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go index a858b5a75..fd6018a10 100644 --- a/vendor/github.com/xenolf/lego/acme/http.go +++ b/vendor/github.com/xenolf/lego/acme/http.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "net" "net/http" "runtime" "strings" @@ -15,7 +16,17 @@ import ( var UserAgent string // HTTPClient is an HTTP client with a reasonable timeout value. -var HTTPClient = http.Client{Timeout: 10 * time.Second} +var HTTPClient = http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 15 * time.Second, + ResponseHeaderTimeout: 15 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, +} const ( // defaultGoUserAgent is the Go HTTP package user agent string. Too diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go index 36db3b217..79ccf154e 100644 --- a/vendor/github.com/xenolf/lego/acme/messages.go +++ b/vendor/github.com/xenolf/lego/acme/messages.go @@ -93,6 +93,11 @@ type revokeCertMessage struct { Certificate string `json:"certificate"` } +type deactivateAuthMessage struct { + Resource string `json:"resource,omitempty"` + Status string `jsom:"status"` +} + // CertificateResource represents a CA issued certificate. // PrivateKey, Certificate and IssuerCertificate are all // already PEM encoded and can be directly written to disk. diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go index 03589a233..3aac9e253 100644 --- a/vendor/github.com/xenolf/lego/cli.go +++ b/vendor/github.com/xenolf/lego/cli.go @@ -32,7 +32,7 @@ func main() { app.Name = "lego" app.Usage = "Let's Encrypt client written in Go" - version := "0.3.1" + version := "0.4.1" if strings.HasPrefix(gittag, "v") { version = gittag } @@ -109,7 +109,7 @@ func main() { app.Flags = []cli.Flag{ cli.StringSliceFlag{ Name: "domains, d", - Usage: "Add domains to the process", + Usage: "Add a domain to the process. Can be specified multiple times.", }, cli.StringFlag{ Name: "csr, c", @@ -209,18 +209,19 @@ Here is an example bash command using the CloudFlare DNS provider: fmt.Fprintln(w, "\tdnsmadeeasy:\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET") fmt.Fprintln(w, "\texoscale:\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT") fmt.Fprintln(w, "\tgandi:\tGANDI_API_KEY") - fmt.Fprintln(w, "\tgcloud:\tGCE_PROJECT") + fmt.Fprintln(w, "\tgcloud:\tGCE_PROJECT, GCE_SERVICE_ACCOUNT_FILE") fmt.Fprintln(w, "\tlinode:\tLINODE_API_KEY") fmt.Fprintln(w, "\tmanual:\tnone") fmt.Fprintln(w, "\tnamecheap:\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY") fmt.Fprintln(w, "\trackspace:\tRACKSPACE_USER, RACKSPACE_API_KEY") fmt.Fprintln(w, "\trfc2136:\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\n\t\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER") - fmt.Fprintln(w, "\troute53:\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION") + fmt.Fprintln(w, "\troute53:\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_HOSTED_ZONE_ID") fmt.Fprintln(w, "\tdyn:\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD") fmt.Fprintln(w, "\tvultr:\tVULTR_API_KEY") fmt.Fprintln(w, "\tovh:\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY") fmt.Fprintln(w, "\tpdns:\tPDNS_API_KEY, PDNS_API_URL") fmt.Fprintln(w, "\tdnspod:\tDNSPOD_API_KEY") + fmt.Fprintln(w, "\totc:\tOTC_USER_NAME, OTC_PASSWORD, OTC_PROJECT_NAME, OTC_DOMAIN_NAME, OTC_IDENTITY_ENDPOINT") w.Flush() fmt.Println(` diff --git a/vendor/github.com/xenolf/lego/cli_handlers.go b/vendor/github.com/xenolf/lego/cli_handlers.go index dad19a144..79bbb37e5 100644 --- a/vendor/github.com/xenolf/lego/cli_handlers.go +++ b/vendor/github.com/xenolf/lego/cli_handlers.go @@ -114,7 +114,7 @@ func setup(c *cli.Context) (*Configuration, *Account, *acme.Client) { } if c.GlobalIsSet("dns") { - provider, err := dns.NewDNSChallengeProviderByName(c.GlobalString("dns")) + provider, err := dns.NewDNSChallengeProviderByName(c.GlobalString("dns")) if err != nil { logger().Fatal(err) } diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go index d079d5e13..04897aa18 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go +++ b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go @@ -12,6 +12,8 @@ import ( "strings" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "github.com/xenolf/lego/acme" @@ -69,7 +71,9 @@ func (c *DNSProvider) Present(domain, token, keyAuth string) error { } rsc := dns.NewRecordSetsClient(c.subscriptionId) - rsc.Authorizer, err = c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + rsc.Authorizer = autorest.NewBearerAuthorizer(spt) + relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) rec := dns.RecordSet{ Name: &relative, @@ -103,7 +107,8 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) rsc := dns.NewRecordSetsClient(c.subscriptionId) - rsc.Authorizer, err = c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + rsc.Authorizer = autorest.NewBearerAuthorizer(spt) _, err = rsc.Delete(c.resourceGroup, zone, relative, dns.TXT, "") if err != nil { return err @@ -120,8 +125,11 @@ func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { } // Now we want to to Azure and get the zone. + spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + dc := dns.NewZonesClient(c.subscriptionId) - dc.Authorizer, err = c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) + dc.Authorizer = autorest.NewBearerAuthorizer(spt) + zone, err := dc.Get(c.resourceGroup, acme.UnFqdn(authZone)) if err != nil { @@ -134,10 +142,10 @@ func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { // NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the // passed credentials map. -func (c *DNSProvider) newServicePrincipalTokenFromCredentials(scope string) (*azure.ServicePrincipalToken, error) { - oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(c.tenantId) +func (c *DNSProvider) newServicePrincipalTokenFromCredentials(scope string) (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, c.tenantId) if err != nil { panic(err) } - return azure.NewServicePrincipalToken(*oauthConfig, c.clientId, c.clientSecret, scope) + return adal.NewServicePrincipalToken(*oauthConfig, c.clientId, c.clientSecret, scope) } diff --git a/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go b/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go index 33fca0fad..94c8879b2 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go +++ b/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go @@ -19,6 +19,7 @@ import ( "github.com/xenolf/lego/providers/dns/linode" "github.com/xenolf/lego/providers/dns/namecheap" "github.com/xenolf/lego/providers/dns/ns1" + "github.com/xenolf/lego/providers/dns/otc" "github.com/xenolf/lego/providers/dns/ovh" "github.com/xenolf/lego/providers/dns/pdns" "github.com/xenolf/lego/providers/dns/rackspace" @@ -48,7 +49,7 @@ func NewDNSChallengeProviderByName(name string) (acme.ChallengeProvider, error) case "dyn": provider, err = dyn.NewDNSProvider() case "exoscale": - provider, err = exoscale.NewDNSProvider() + provider, err = exoscale.NewDNSProvider() case "gandi": provider, err = gandi.NewDNSProvider() case "gcloud": @@ -73,6 +74,8 @@ func NewDNSChallengeProviderByName(name string) (acme.ChallengeProvider, error) provider, err = pdns.NewDNSProvider() case "ns1": provider, err = ns1.NewDNSProvider() + case "otc": + provider, err = otc.NewDNSProvider() default: err = fmt.Errorf("Unrecognised DNS provider: %s", name) } diff --git a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go index 3b6b58d08..7b2fccc98 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go +++ b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go @@ -7,7 +7,7 @@ import ( "fmt" "os" - "github.com/pyr/egoscale/src/egoscale" + "github.com/exoscale/egoscale" "github.com/xenolf/lego/acme" ) diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go index ea6c0875c..ba753f6dc 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go +++ b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go @@ -4,12 +4,14 @@ package googlecloud import ( "fmt" + "io/ioutil" "os" "time" "github.com/xenolf/lego/acme" "golang.org/x/net/context" + "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/dns/v1" @@ -22,9 +24,14 @@ type DNSProvider struct { } // NewDNSProvider returns a DNSProvider instance configured for Google Cloud -// DNS. Credentials must be passed in the environment variable: GCE_PROJECT. +// DNS. Project name must be passed in the environment variable: GCE_PROJECT. +// A Service Account file can be passed in the environment variable: +// GCE_SERVICE_ACCOUNT_FILE func NewDNSProvider() (*DNSProvider, error) { project := os.Getenv("GCE_PROJECT") + if saFile, ok := os.LookupEnv("GCE_SERVICE_ACCOUNT_FILE"); ok { + return NewDNSProviderServiceAccount(project, saFile) + } return NewDNSProviderCredentials(project) } @@ -49,6 +56,36 @@ func NewDNSProviderCredentials(project string) (*DNSProvider, error) { }, nil } +// NewDNSProviderServiceAccount uses the supplied service account JSON file to +// return a DNSProvider instance configured for Google Cloud DNS. +func NewDNSProviderServiceAccount(project string, saFile string) (*DNSProvider, error) { + if project == "" { + return nil, fmt.Errorf("Google Cloud project name missing") + } + if saFile == "" { + return nil, fmt.Errorf("Google Cloud Service Account file missing") + } + + dat, err := ioutil.ReadFile(saFile) + if err != nil { + return nil, fmt.Errorf("Unable to read Service Account file: %v", err) + } + conf, err := google.JWTConfigFromJSON(dat, dns.NdevClouddnsReadwriteScope) + if err != nil { + return nil, fmt.Errorf("Unable to acquire config: %v", err) + } + client := conf.Client(oauth2.NoContext) + + svc, err := dns.New(client) + if err != nil { + return nil, fmt.Errorf("Unable to create Google Cloud DNS service: %v", err) + } + return &DNSProvider{ + project: project, + client: svc, + }, nil +} + // Present creates a TXT record to fulfil the dns-01 challenge. func (c *DNSProvider) Present(domain, token, keyAuth string) error { fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) diff --git a/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go b/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go new file mode 100644 index 000000000..0f2acb4b4 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go @@ -0,0 +1,152 @@ +package otc + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" +) + +var fakeOTCUserName = "test" +var fakeOTCPassword = "test" +var fakeOTCDomainName = "test" +var fakeOTCProjectName = "test" +var fakeOTCToken = "62244bc21da68d03ebac94e6636ff01f" + +type DNSMock struct { + t *testing.T + Server *httptest.Server + Mux *http.ServeMux +} + +func NewDNSMock(t *testing.T) *DNSMock { + return &DNSMock{ + t: t, + } +} + +// Setup creates the mock server +func (m *DNSMock) Setup() { + m.Mux = http.NewServeMux() + m.Server = httptest.NewServer(m.Mux) +} + +// ShutdownServer creates the mock server +func (m *DNSMock) ShutdownServer() { + m.Server.Close() +} + +func (m *DNSMock) HandleAuthSuccessfully() { + m.Mux.HandleFunc("/v3/auth/token", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Subject-Token", fakeOTCToken) + + fmt.Fprintf(w, `{ + "token": { + "catalog": [ + { + "type": "dns", + "id": "56cd81db1f8445d98652479afe07c5ba", + "name": "", + "endpoints": [ + { + "url": "%s", + "region": "eu-de", + "region_id": "eu-de", + "interface": "public", + "id": "0047a06690484d86afe04877074efddf" + } + ] + } + ] + }}`, m.Server.URL) + }) +} + +func (m *DNSMock) HandleListZonesSuccessfully() { + m.Mux.HandleFunc("/v2/zones", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "zones":[{ + "id":"123123" + }]} + `) + + assert.Equal(m.t, r.Method, "GET") + assert.Equal(m.t, r.URL.Path, "/v2/zones") + assert.Equal(m.t, r.URL.RawQuery, "name=example.com.") + assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") + }) +} + +func (m *DNSMock) HandleListZonesEmpty() { + m.Mux.HandleFunc("/v2/zones", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "zones":[ + ]} + `) + + assert.Equal(m.t, r.Method, "GET") + assert.Equal(m.t, r.URL.Path, "/v2/zones") + assert.Equal(m.t, r.URL.RawQuery, "name=example.com.") + assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") + }) +} + +func (m *DNSMock) HandleDeleteRecordsetsSuccessfully() { + m.Mux.HandleFunc("/v2/zones/123123/recordsets/321321", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "zones":[{ + "id":"123123" + }]} + `) + + assert.Equal(m.t, r.Method, "DELETE") + assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets/321321") + assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") + }) +} + +func (m *DNSMock) HandleListRecordsetsEmpty() { + m.Mux.HandleFunc("/v2/zones/123123/recordsets", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "recordsets":[ + ]} + `) + + assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets") + assert.Equal(m.t, r.URL.RawQuery, "type=TXT&name=_acme-challenge.example.com.") + }) +} +func (m *DNSMock) HandleListRecordsetsSuccessfully() { + m.Mux.HandleFunc("/v2/zones/123123/recordsets", func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + fmt.Fprintf(w, `{ + "recordsets":[{ + "id":"321321" + }]} + `) + + assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets") + assert.Equal(m.t, r.URL.RawQuery, "type=TXT&name=_acme-challenge.example.com.") + + } else if r.Method == "POST" { + body, err := ioutil.ReadAll(r.Body) + + assert.Nil(m.t, err) + exceptedString := "{\"name\":\"_acme-challenge.example.com.\",\"description\":\"Added TXT record for ACME dns-01 challenge using lego client\",\"type\":\"TXT\",\"ttl\":300,\"records\":[\"\\\"w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI\\\"\"]}" + assert.Equal(m.t, string(body), exceptedString) + + fmt.Fprintf(w, `{ + "recordsets":[{ + "id":"321321" + }]} + `) + + } else { + m.t.Errorf("Expected method to be 'GET' or 'POST' but got '%s'", r.Method) + } + + assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") + }) +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go b/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go new file mode 100644 index 000000000..86bcaa9b7 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go @@ -0,0 +1,388 @@ +// Package otc implements a DNS provider for solving the DNS-01 challenge +// using Open Telekom Cloud Managed DNS. +package otc + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses +// OTC's Managed DNS API to manage TXT records for a domain. +type DNSProvider struct { + identityEndpoint string + otcBaseURL string + domainName string + projectName string + userName string + password string + token string +} + +// NewDNSProvider returns a DNSProvider instance configured for OTC DNS. +// Credentials must be passed in the environment variables: OTC_USER_NAME, +// OTC_DOMAIN_NAME, OTC_PASSWORD OTC_PROJECT_NAME and OTC_IDENTITY_ENDPOINT. +func NewDNSProvider() (*DNSProvider, error) { + domainName := os.Getenv("OTC_DOMAIN_NAME") + userName := os.Getenv("OTC_USER_NAME") + password := os.Getenv("OTC_PASSWORD") + projectName := os.Getenv("OTC_PROJECT_NAME") + identityEndpoint := os.Getenv("OTC_IDENTITY_ENDPOINT") + return NewDNSProviderCredentials(domainName, userName, password, projectName, identityEndpoint) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for OTC DNS. +func NewDNSProviderCredentials(domainName, userName, password, projectName, identityEndpoint string) (*DNSProvider, error) { + if domainName == "" || userName == "" || password == "" || projectName == "" { + return nil, fmt.Errorf("OTC credentials missing") + } + + if identityEndpoint == "" { + identityEndpoint = "https://iam.eu-de.otc.t-systems.com:443/v3/auth/tokens" + } + + return &DNSProvider{ + identityEndpoint: identityEndpoint, + domainName: domainName, + userName: userName, + password: password, + projectName: projectName, + }, nil +} + +func (d *DNSProvider) SendRequest(method, resource string, payload interface{}) (io.Reader, error) { + url := fmt.Sprintf("%s/%s", d.otcBaseURL, resource) + + body, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(method, url, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + if len(d.token) > 0 { + req.Header.Set("X-Auth-Token", d.token) + } + + // Workaround for keep alive bug in otc api + tr := http.DefaultTransport.(*http.Transport) + tr.DisableKeepAlives = true + + client := &http.Client{ + Timeout: time.Duration(10 * time.Second), + Transport: tr, + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("OTC API request %s failed with HTTP status code %d", url, resp.StatusCode) + } + + body1, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return bytes.NewReader(body1), nil +} + +func (d *DNSProvider) loginRequest() error { + type nameResponse struct { + Name string `json:"name"` + } + + type userResponse struct { + Name string `json:"name"` + Password string `json:"password"` + Domain nameResponse `json:"domain"` + } + + type passwordResponse struct { + User userResponse `json:"user"` + } + type identityResponse struct { + Methods []string `json:"methods"` + Password passwordResponse `json:"password"` + } + + type scopeResponse struct { + Project nameResponse `json:"project"` + } + + type authResponse struct { + Identity identityResponse `json:"identity"` + Scope scopeResponse `json:"scope"` + } + + type loginResponse struct { + Auth authResponse `json:"auth"` + } + + userResp := userResponse{ + Name: d.userName, + Password: d.password, + Domain: nameResponse{ + Name: d.domainName, + }, + } + + loginResp := loginResponse{ + Auth: authResponse{ + Identity: identityResponse{ + Methods: []string{"password"}, + Password: passwordResponse{ + User: userResp, + }, + }, + Scope: scopeResponse{ + Project: nameResponse{ + Name: d.projectName, + }, + }, + }, + } + + body, err := json.Marshal(loginResp) + if err != nil { + return err + } + req, err := http.NewRequest("POST", d.identityEndpoint, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: time.Duration(10 * time.Second)} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return fmt.Errorf("OTC API request failed with HTTP status code %d", resp.StatusCode) + } + + d.token = resp.Header.Get("X-Subject-Token") + + if d.token == "" { + return fmt.Errorf("unable to get auth token") + } + + type endpointResponse struct { + Token struct { + Catalog []struct { + Type string `json:"type"` + Endpoints []struct { + URL string `json:"url"` + } `json:"endpoints"` + } `json:"catalog"` + } `json:"token"` + } + var endpointResp endpointResponse + + err = json.NewDecoder(resp.Body).Decode(&endpointResp) + if err != nil { + return err + } + + for _, v := range endpointResp.Token.Catalog { + if v.Type == "dns" { + for _, endpoint := range v.Endpoints { + d.otcBaseURL = fmt.Sprintf("%s/v2", endpoint.URL) + continue + } + } + } + + if d.otcBaseURL == "" { + return fmt.Errorf("unable to get dns endpoint") + } + + return nil +} + +// Starts a new OTC API Session. Authenticates using userName, password +// and receives a token to be used in for subsequent requests. +func (d *DNSProvider) login() error { + err := d.loginRequest() + if err != nil { + return err + } + + return nil +} + +func (d *DNSProvider) getZoneID(zone string) (string, error) { + type zoneItem struct { + ID string `json:"id"` + } + + type zonesResponse struct { + Zones []zoneItem `json:"zones"` + } + + resource := fmt.Sprintf("zones?name=%s", zone) + resp, err := d.SendRequest("GET", resource, nil) + if err != nil { + return "", err + } + + var zonesRes zonesResponse + err = json.NewDecoder(resp).Decode(&zonesRes) + if err != nil { + return "", err + } + + if len(zonesRes.Zones) < 1 { + return "", fmt.Errorf("zone %s not found", zone) + } + + if len(zonesRes.Zones) > 1 { + return "", fmt.Errorf("to many zones found") + } + + if zonesRes.Zones[0].ID == "" { + return "", fmt.Errorf("id not found") + } + + return zonesRes.Zones[0].ID, nil +} + +func (d *DNSProvider) getRecordSetID(zoneID string, fqdn string) (string, error) { + type recordSet struct { + ID string `json:"id"` + } + + type recordSetsResponse struct { + RecordSets []recordSet `json:"recordsets"` + } + + resource := fmt.Sprintf("zones/%s/recordsets?type=TXT&name=%s", zoneID, fqdn) + resp, err := d.SendRequest("GET", resource, nil) + if err != nil { + return "", err + } + + var recordSetsRes recordSetsResponse + err = json.NewDecoder(resp).Decode(&recordSetsRes) + if err != nil { + return "", err + } + + if len(recordSetsRes.RecordSets) < 1 { + return "", fmt.Errorf("record not found") + } + + if len(recordSetsRes.RecordSets) > 1 { + return "", fmt.Errorf("to many records found") + } + + if recordSetsRes.RecordSets[0].ID == "" { + return "", fmt.Errorf("id not found") + } + + return recordSetsRes.RecordSets[0].ID, nil +} + +func (d *DNSProvider) deleteRecordSet(zoneID, recordID string) error { + resource := fmt.Sprintf("zones/%s/recordsets/%s", zoneID, recordID) + + _, err := d.SendRequest("DELETE", resource, nil) + if err != nil { + return err + } + return nil +} + +// Present creates a TXT record using the specified parameters +func (d *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + if ttl < 300 { + ttl = 300 // 300 is otc minimum value for ttl + } + + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return err + } + + err = d.login() + if err != nil { + return err + } + + zoneID, err := d.getZoneID(authZone) + if err != nil { + return fmt.Errorf("unable to get zone: %s", err) + } + + resource := fmt.Sprintf("zones/%s/recordsets", zoneID) + + type recordset struct { + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Ttl int `json:"ttl"` + Records []string `json:"records"` + } + + r1 := &recordset{ + Name: fqdn, + Description: "Added TXT record for ACME dns-01 challenge using lego client", + Type: "TXT", + Ttl: 300, + Records: []string{fmt.Sprintf("\"%s\"", value)}, + } + _, err = d.SendRequest("POST", resource, r1) + + if err != nil { + return err + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return err + } + + err = d.login() + if err != nil { + return err + } + + zoneID, err := d.getZoneID(authZone) + + if err != nil { + return err + } + + recordID, err := d.getRecordSetID(zoneID, fqdn) + if err != nil { + return fmt.Errorf("unable go get record %s for zone %s: %s", fqdn, domain, err) + } + return d.deleteRecordSet(zoneID, recordID) +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go index 2b106a27e..13daa8c8a 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go +++ b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go @@ -269,7 +269,7 @@ func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawM return r, nil } -// RackspaceRecords is the list of records sent/recieved from the DNS API +// RackspaceRecords is the list of records sent/received from the DNS API type RackspaceRecords struct { RackspaceRecord []RackspaceRecord `json:"records"` } diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go index 43a95f18c..dde42ddf1 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go +++ b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go @@ -20,27 +20,32 @@ type DNSProvider struct { tsigAlgorithm string tsigKey string tsigSecret string + timeout time.Duration } // NewDNSProvider returns a DNSProvider instance configured for rfc2136 -// dynamic update. Credentials must be passed in the environment variables: -// RFC2136_NAMESERVER, RFC2136_TSIG_ALGORITHM, RFC2136_TSIG_KEY and -// RFC2136_TSIG_SECRET. To disable TSIG authentication, leave the TSIG -// variables unset. RFC2136_NAMESERVER must be a network address in the form -// "host" or "host:port". +// dynamic update. Configured with environment variables: +// RFC2136_NAMESERVER: Network address in the form "host" or "host:port". +// RFC2136_TSIG_ALGORITHM: Defaults to hmac-md5.sig-alg.reg.int. (HMAC-MD5). +// See https://github.com/miekg/dns/blob/master/tsig.go for supported values. +// RFC2136_TSIG_KEY: Name of the secret key as defined in DNS server configuration. +// RFC2136_TSIG_SECRET: Secret key payload. +// RFC2136_TIMEOUT: DNS propagation timeout in time.ParseDuration format. (60s) +// To disable TSIG authentication, leave the RFC2136_TSIG* variables unset. func NewDNSProvider() (*DNSProvider, error) { nameserver := os.Getenv("RFC2136_NAMESERVER") tsigAlgorithm := os.Getenv("RFC2136_TSIG_ALGORITHM") tsigKey := os.Getenv("RFC2136_TSIG_KEY") tsigSecret := os.Getenv("RFC2136_TSIG_SECRET") - return NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret) + timeout := os.Getenv("RFC2136_TIMEOUT") + return NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret, timeout) } // NewDNSProviderCredentials uses the supplied credentials to return a // DNSProvider instance configured for rfc2136 dynamic update. To disable TSIG // authentication, leave the TSIG parameters as empty strings. // nameserver must be a network address in the form "host" or "host:port". -func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret string) (*DNSProvider, error) { +func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret, timeout string) (*DNSProvider, error) { if nameserver == "" { return nil, fmt.Errorf("RFC2136 nameserver missing") } @@ -65,9 +70,27 @@ func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret st d.tsigSecret = tsigSecret } + if timeout == "" { + d.timeout = 60 * time.Second + } else { + t, err := time.ParseDuration(timeout) + if err != nil { + return nil, err + } else if t < 0 { + return nil, fmt.Errorf("Invalid/negative RFC2136_TIMEOUT: %v", timeout) + } else { + d.timeout = t + } + } + return d, nil } +// Returns the timeout configured with RFC2136_TIMEOUT, or 60s. +func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { + return d.timeout, 2 * time.Second +} + // Present creates a TXT record using the specified parameters func (r *DNSProvider) Present(domain, token, keyAuth string) error { fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go index f3e53a8e5..934f0a2d4 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go +++ b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go @@ -5,6 +5,7 @@ package route53 import ( "fmt" "math/rand" + "os" "strings" "time" @@ -23,7 +24,8 @@ const ( // DNSProvider implements the acme.ChallengeProvider interface type DNSProvider struct { - client *route53.Route53 + client *route53.Route53 + hostedZoneID string } // customRetryer implements the client.Retryer interface by composing the @@ -58,14 +60,22 @@ func (d customRetryer) RetryRules(r *request.Request) time.Duration { // 2. Shared credentials file (defaults to ~/.aws/credentials) // 3. Amazon EC2 IAM role // +// If AWS_HOSTED_ZONE_ID is not set, Lego tries to determine the correct +// public hosted zone via the FQDN. +// // See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk func NewDNSProvider() (*DNSProvider, error) { + hostedZoneID := os.Getenv("AWS_HOSTED_ZONE_ID") + r := customRetryer{} r.NumMaxRetries = maxRetries config := request.WithRetryer(aws.NewConfig(), r) client := route53.New(session.New(config)) - return &DNSProvider{client: client}, nil + return &DNSProvider{ + client: client, + hostedZoneID: hostedZoneID, + }, nil } // Present creates a TXT record using the specified parameters @@ -83,7 +93,7 @@ func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { } func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { - hostedZoneID, err := getHostedZoneID(fqdn, r.client) + hostedZoneID, err := r.getHostedZoneID(fqdn) if err != nil { return fmt.Errorf("Failed to determine Route 53 hosted zone ID: %v", err) } @@ -124,7 +134,11 @@ func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { }) } -func getHostedZoneID(fqdn string, client *route53.Route53) (string, error) { +func (r *DNSProvider) getHostedZoneID(fqdn string) (string, error) { + if r.hostedZoneID != "" { + return r.hostedZoneID, nil + } + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) if err != nil { return "", err @@ -134,7 +148,7 @@ func getHostedZoneID(fqdn string, client *route53.Route53) (string, error) { reqParams := &route53.ListHostedZonesByNameInput{ DNSName: aws.String(acme.UnFqdn(authZone)), } - resp, err := client.ListHostedZonesByName(reqParams) + resp, err := r.client.ListHostedZonesByName(reqParams) if err != nil { return "", err } diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 0b9c9d706..fa66410d6 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -168,12 +168,20 @@ type Change struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Additions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Change) MarshalJSON() ([]byte, error) { type noMethod Change raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ChangesListResponse: The response to a request to enumerate Changes @@ -209,12 +217,20 @@ type ChangesListResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Changes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { type noMethod ChangesListResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ManagedZone: A zone is a subtree of the DNS namespace under one @@ -243,7 +259,7 @@ type ManagedZone struct { Kind string `json:"kind,omitempty"` // Name: User assigned name for this resource. Must be unique within the - // project. The name must be 1-32 characters long, must begin with a + // project. The name must be 1-63 characters long, must begin with a // letter, end with a letter or digit, and only contain lowercase // letters, digits or dashes. Name string `json:"name,omitempty"` @@ -268,12 +284,20 @@ type ManagedZone struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedZone) MarshalJSON() ([]byte, error) { type noMethod ManagedZone raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ManagedZonesListResponse struct { @@ -307,12 +331,20 @@ type ManagedZonesListResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { type noMethod ManagedZonesListResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A project resource. The project is a top level container for @@ -344,12 +376,20 @@ type Project struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Project) MarshalJSON() ([]byte, error) { type noMethod Project raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Quota: Limits associated with a Project. @@ -388,12 +428,20 @@ type Quota struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Quota) MarshalJSON() ([]byte, error) { type noMethod Quota raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResourceRecordSet: A unit of data that will be returned by the DNS @@ -425,12 +473,20 @@ type ResourceRecordSet struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { type noMethod ResourceRecordSet raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResourceRecordSetsListResponse struct { @@ -464,12 +520,20 @@ type ResourceRecordSetsListResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { type noMethod ResourceRecordSetsListResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "dns.changes.create": @@ -481,6 +545,7 @@ type ChangesCreateCall struct { change *Change urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Atomically update the ResourceRecordSet collection. @@ -508,27 +573,37 @@ func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.changes.create" call. @@ -563,7 +638,8 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -614,6 +690,7 @@ type ChangesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Fetch the representation of an existing Change. @@ -651,25 +728,36 @@ func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, "changeId": c.changeId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.changes.get" call. @@ -704,7 +792,8 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -760,6 +849,7 @@ type ChangesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Enumerate Changes to a ResourceRecordSet collection. @@ -829,24 +919,35 @@ func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.changes.list" call. @@ -881,7 +982,8 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -978,6 +1080,7 @@ type ManagedZonesCreateCall struct { managedzone *ManagedZone urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Create a new ManagedZone. @@ -1004,26 +1107,36 @@ func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreat return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.managedZones.create" call. @@ -1058,7 +1171,8 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1100,6 +1214,7 @@ type ManagedZonesDeleteCall struct { managedZone string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Delete a previously created ManagedZone. @@ -1126,21 +1241,32 @@ func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDelet return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.managedZones.delete" call. @@ -1195,6 +1321,7 @@ type ManagedZonesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Fetch the representation of an existing ManagedZone. @@ -1231,24 +1358,35 @@ func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.managedZones.get" call. @@ -1283,7 +1421,8 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1331,6 +1470,7 @@ type ManagedZonesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Enumerate ManagedZones that have been created but not yet @@ -1390,23 +1530,34 @@ func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCal return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.managedZones.list" call. @@ -1441,7 +1592,8 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1519,6 +1671,7 @@ type ProjectsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Fetch the representation of an existing Project. @@ -1554,23 +1707,34 @@ func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.projects.get" call. @@ -1605,7 +1769,8 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1647,6 +1812,7 @@ type ResourceRecordSetsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Enumerate ResourceRecordSets that have been created but not yet @@ -1715,24 +1881,35 @@ func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecor return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourceRecordSetsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "dns.resourceRecordSets.list" call. @@ -1767,7 +1944,8 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go index 4b8ec1424..992104911 100644 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -11,8 +11,8 @@ import ( "google.golang.org/api/googleapi" ) -// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type ResumableBuffer struct { +// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type MediaBuffer struct { media io.Reader chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. @@ -22,42 +22,42 @@ type ResumableBuffer struct { off int64 } -func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { - return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} } // Chunk returns the current buffered chunk, the offset in the underlying media // from which the chunk is drawn, and the size of the chunk. // Successive calls to Chunk return the same chunk between calls to Next. -func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if rb.err == nil && len(rb.chunk) == 0 { - rb.err = rb.loadChunk() + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() } - return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err } // loadChunk will read from media into chunk, up to the capacity of chunk. -func (rb *ResumableBuffer) loadChunk() error { - bufSize := cap(rb.chunk) - rb.chunk = rb.chunk[:bufSize] +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] read := 0 var err error for err == nil && read < bufSize { var n int - n, err = rb.media.Read(rb.chunk[read:]) + n, err = mb.media.Read(mb.chunk[read:]) read += n } - rb.chunk = rb.chunk[:read] + mb.chunk = mb.chunk[:read] return err } // Next advances to the next chunk, which will be returned by the next call to Chunk. // Calls to Next without a corresponding prior call to Chunk will have no effect. -func (rb *ResumableBuffer) Next() { - rb.off += int64(len(rb.chunk)) - rb.chunk = rb.chunk[0:0] +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] } type readerTyper struct { diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go new file mode 100644 index 000000000..cb5e67c77 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/header.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "runtime" + "strings" +) + +// GoogleClientHeader returns the value to use for the x-goog-api-client +// header, which is used internally by Google. +func GoogleClientHeader(generatorVersion, clientElement string) string { + elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} + if clientElement != "" { + elts = append(elts, clientElement) + } + elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) + return strings.Join(elts, " ") +} diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go index dd7bcd2eb..c01e32189 100644 --- a/vendor/google.golang.org/api/gensupport/json.go +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -12,29 +12,43 @@ import ( ) // MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if: -// * it has a non-empty value, or -// * its field name is present in forceSendFields, and -// * it is not a nil pointer or nil interface. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. // The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { - if len(forceSendFields) == 0 { +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { return json.Marshal(schema) } - mustInclude := make(map[string]struct{}) + mustInclude := make(map[string]bool) for _, f := range forceSendFields { - mustInclude[f] = struct{}{} + mustInclude[f] = true + } + useNull := make(map[string]bool) + useNullMaps := make(map[string]map[string]bool) + for _, nf := range nullFields { + parts := strings.SplitN(nf, ".", 2) + field := parts[0] + if len(parts) == 1 { + useNull[field] = true + } else { + if useNullMaps[field] == nil { + useNullMaps[field] = map[string]bool{} + } + useNullMaps[field][parts[1]] = true + } } - dataMap, err := schemaToMap(schema, mustInclude) + dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) if err != nil { return nil, err } return json.Marshal(dataMap) } -func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { +func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { m := make(map[string]interface{}) s := reflect.ValueOf(schema) st := s.Type() @@ -54,10 +68,36 @@ func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[strin v := s.Field(i) f := st.Field(i) + + if useNull[f.Name] { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } + if !includeField(v, f, mustInclude) { continue } + // If map fields are explicitly set to null, use a map[string]interface{}. + if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { + ms, ok := v.Interface().(map[string]string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + } + mi := map[string]interface{}{} + for k, v := range ms { + mi[k] = v + } + for k := range useNullMaps[f.Name] { + mi[k] = nil + } + m[tag.apiName] = mi + continue + } + // nil maps are treated as empty maps. if f.Type.Kind() == reflect.Map && v.IsNil() { m[tag.apiName] = map[string]string{} @@ -127,7 +167,7 @@ func parseJSONTag(val string) (jsonTag, error) { } // Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { // The regular JSON encoding of a nil pointer is "null", which means "delete this field". // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. // However, many fields are not pointers, so there would be no way to delete these fields. @@ -144,8 +184,7 @@ func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string return false } - _, ok := mustInclude[f.Name] - return ok || !isEmptyValue(v) + return mustInclude[f.Name] || !isEmptyValue(v) } // isEmptyValue reports whether v is the empty value for its type. This diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go new file mode 100644 index 000000000..cb02335d2 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go @@ -0,0 +1,57 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gensupport + +import ( + "encoding/json" + "errors" + "fmt" + "math" +) + +// JSONFloat64 is a float64 that supports proper unmarshaling of special float +// values in JSON, according to +// https://developers.google.com/protocol-buffers/docs/proto3#json. Although +// that is a proto-to-JSON spec, it applies to all Google APIs. +// +// The jsonpb package +// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has +// similar functionality, but only for direct translation from proto messages +// to JSON. +type JSONFloat64 float64 + +func (f *JSONFloat64) UnmarshalJSON(data []byte) error { + var ff float64 + if err := json.Unmarshal(data, &ff); err == nil { + *f = JSONFloat64(ff) + return nil + } + var s string + if err := json.Unmarshal(data, &s); err == nil { + switch s { + case "NaN": + ff = math.NaN() + case "Infinity": + ff = math.Inf(1) + case "-Infinity": + ff = math.Inf(-1) + default: + return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) + } + *f = JSONFloat64(ff) + return nil + } + return errors.New("google.golang.org/api/internal: data not float or string") +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index 817f46f5d..f3e77fc52 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -174,27 +174,126 @@ func typeHeader(contentType string) textproto.MIMEHeader { // PrepareUpload determines whether the data in the supplied reader should be // uploaded in a single request, or in sequential chunks. // chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. // // After PrepareUpload has been called, media should no longer be used: the // media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, - *ResumableBuffer) { +func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { if chunkSize == 0 { // do not chunk - return media, nil + return media, nil, true + } + mb = NewMediaBuffer(media, chunkSize) + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu + } +} + +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" + } + return "resumable" +} + +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + cleanup = func() { combined.Close() } + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, cleanup +} + +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, } - - rb := NewResumableBuffer(media, chunkSize) - rdr, _, _, err := rb.Chunk() - - if err == io.EOF { // we can upload this in a single request - return rdr, nil - } - // err might be a non-EOF error. If it is, the next call to rb.Chunk will - // return the same error. Returning a ResumableBuffer ensures that this error - // will be handled at some point. - - return nil, rb } diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go index b3e774aa4..dcd591f7f 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -5,6 +5,7 @@ package gensupport import ( + "errors" "fmt" "io" "net/http" @@ -12,14 +13,9 @@ import ( "time" "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( - // statusResumeIncomplete is the code returned by the Google uploader - // when the transfer is not yet complete. - statusResumeIncomplete = 308 - // statusTooManyRequests is returned by the storage API if the // per-project limits have been temporarily exceeded. The request // should be retried. @@ -35,7 +31,7 @@ type ResumableUpload struct { URI string UserAgent string // User-Agent for header of the request // Media is the object being uploaded. - Media *ResumableBuffer + Media *MediaBuffer // MediaType defines the media type, e.g. "image/jpeg". MediaType string @@ -80,8 +76,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Range", contentRange) req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) - return ctxhttp.Do(ctx, rx.Client, req) + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in + // semantics, Google added this new request header which + // causes it to not use "308" and instead reply with 200 OK + // and sets the upload-specific "X-HTTP-Status-Code-Override: + // 308" response header. + req.Header.Set("X-GUploader-No-308", "yes") + + return SendRequest(ctx, rx.Client, req) +} + +func statusResumeIncomplete(resp *http.Response) bool { + // This is how the server signals "status resume incomplete" + // when X-GUploader-No-308 is set to "yes": + return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" } // reportProgress calls a user-supplied callback to report upload progress. @@ -112,11 +123,17 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e return res, err } - if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + // We sent "X-GUploader-No-308: yes" (see comment elsewhere in + // this file), so we don't expect to get a 308. + if res.StatusCode == 308 { + return nil, errors.New("unexpected 308 response status code") + } + + if res.StatusCode == http.StatusOK { rx.reportProgress(off, off+int64(size)) } - if res.StatusCode == statusResumeIncomplete { + if statusResumeIncomplete(res) { rx.Media.Next() } return res, nil @@ -135,6 +152,8 @@ func contextDone(ctx context.Context) bool { // It retries using the provided back off strategy until cancelled or the // strategy indicates to stop retrying. // It is called from the auto-generated API code and is not visible to the user. +// Before sending an HTTP request, Upload calls any registered hook functions, +// and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { @@ -176,7 +195,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. - if status == statusResumeIncomplete { + if statusResumeIncomplete(resp) { pause = 0 backoff.Reset() resp.Body.Close() diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go index 7f83d1da9..c60b3c394 100644 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -1,3 +1,17 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package gensupport import ( @@ -55,23 +69,17 @@ func DefaultBackoffStrategy() BackoffStrategy { // shouldRetry returns true if the HTTP response / error indicates that the // request should be attempted again. func shouldRetry(status int, err error) bool { - // Retry for 5xx response codes. - if 500 <= status && status < 600 { + if 500 <= status && status <= 599 { return true } - - // Retry on statusTooManyRequests{ if status == statusTooManyRequests { return true } - - // Retry on unexpected EOFs and temporary network errors. if err == io.ErrUnexpectedEOF { return true } if err, ok := err.(net.Error); ok { return err.Temporary() } - return false } diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go new file mode 100644 index 000000000..092044f44 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Hook is the type of a function that is called once before each HTTP request +// that is sent by a generated API. It returns a function that is called after +// the request returns. +// Hooks are not called if the context is nil. +type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) + +var hooks []Hook + +// RegisterHook registers a Hook to be called before each HTTP request by a +// generated API. Hooks are called in the order they are registered. Each +// hook can return a function; if it is non-nil, it is called after the HTTP +// request returns. These functions are called in the reverse order. +// RegisterHook should not be called concurrently with itself or SendRequest. +func RegisterHook(h Hook) { + hooks = append(hooks, h) +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, it calls all hooks, then sends the request with +// ctxhttp.Do, then calls any functions returned by the hooks in reverse order. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request. + resp, err := ctxhttp.Do(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index fd556730c..f6e15be35 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -50,7 +50,7 @@ const ( // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // The default chunk size to use for resumable uplods if not specified by the user. + // The default chunk size to use for resumable uploads if not specified by the user. DefaultUploadChunkSize = 8 * 1024 * 1024 // The minimum chunk size that can be used for resumable uploads. All @@ -149,12 +149,12 @@ func IsNotModified(err error) bool { // CheckMediaResponse returns an error (of type *Error) if the response // status code is not 2xx. Unlike CheckResponse it does not assume the // body is a JSON error document. +// It is the caller's responsibility to close res.Body. func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - res.Body.Close() return &Error{ Code: res.StatusCode, Body: string(slurp), @@ -278,41 +278,15 @@ func ResolveRelative(basestr, relstr string) string { return us } -// has4860Fix is whether this Go environment contains the fix for -// http://golang.org/issue/4860 -var has4860Fix bool - -// init initializes has4860Fix by checking the behavior of the net/http package. -func init() { - r := http.Request{ - URL: &url.URL{ - Scheme: "http", - Opaque: "//opaque", - }, - } - b := &bytes.Buffer{} - r.Write(b) - has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) -} - -// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it -// don't alter any hex-escaped characters in u.Path. -func SetOpaque(u *url.URL) { - u.Opaque = "//" + u.Host + u.Path - if !has4860Fix { - u.Opaque = u.Scheme + ":" + u.Opaque - } -} - // Expand subsitutes any {encoded} strings in the URL passed in using // the map supplied. // // This calls SetOpaque to avoid encoding of the parameters in the URL path. func Expand(u *url.URL, expansions map[string]string) { - expanded, err := uritemplates.Expand(u.Path, expansions) + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) if err == nil { - u.Path = expanded - SetOpaque(u) + u.Path = unescaped + u.RawPath = escaped } } @@ -421,4 +395,12 @@ type userIP string func (i userIP) Get() (string, string) { return "userIp", string(i) } +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + // TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go index 7c103ba13..63bf05383 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -34,11 +34,37 @@ func pctEncode(src []byte) []byte { return dst } -func escape(s string, allowReserved bool) string { +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) if allowReserved { - return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) } - return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() } // A uriTemplate is a parsed representation of a URI template. @@ -170,18 +196,20 @@ func parseTerm(term string) (result templateTerm, err error) { return result, err } -// Expand expands a URI template with a set of values to produce a string. -func (t *uriTemplate) Expand(values map[string]string) string { - var buf bytes.Buffer +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter for _, p := range t.parts { - p.expand(&buf, values) + p.expand(&w, values) } - return buf.String() + return w.Escaped(), w.Unescaped() } -func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { if len(tp.raw) > 0 { - buf.WriteString(tp.raw) + w.Write(tp.raw) return } var first = true @@ -191,30 +219,30 @@ func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { continue } if first { - buf.WriteString(tp.first) + w.Write(tp.first) first = false } else { - buf.WriteString(tp.sep) + w.Write(tp.sep) } - tp.expandString(buf, term, value) + tp.expandString(w, term, value) } } -func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { if tp.named { - buf.WriteString(name) + w.Write(name) if empty { - buf.WriteString(tp.ifemp) + w.Write(tp.ifemp) } else { - buf.WriteString("=") + w.Write("=") } } } -func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } - tp.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, tp.allowReserved)) + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go index eff260a69..2e70b8154 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -4,10 +4,14 @@ package uritemplates -func Expand(path string, values map[string]string) (string, error) { +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { template, err := parse(path) if err != nil { - return "", err + return "", "", err } - return template.Expand(values), nil + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil } diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a02b4b071..c8fdd5416 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -6,6 +6,7 @@ package googleapi import ( "encoding/json" + "errors" "strconv" ) @@ -149,6 +150,25 @@ func (s Float64s) MarshalJSON() ([]byte, error) { }) } +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE index 37ec93a14..d361bbcdf 100644 --- a/vendor/gopkg.in/ini.v1/LICENSE +++ b/vendor/gopkg.in/ini.v1/LICENSE @@ -176,7 +176,7 @@ recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Unknwon Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 5211d5abc..07dc62c50 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -24,10 +24,8 @@ import ( "os" "regexp" "runtime" - "strconv" "strings" "sync" - "time" ) const ( @@ -37,7 +35,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.27.0" + _VERSION = "1.30.0" ) // Version returns current package version literal. @@ -60,6 +58,9 @@ var ( // Explicitly write DEFAULT section header DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true ) func init() { @@ -180,6 +181,8 @@ type LoadOptions struct { AllowBooleanKeys bool // AllowShadows indicates whether to keep track of keys with same name under same section. AllowShadows bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string @@ -395,10 +398,7 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { equalSign := "=" if PrettyFormat { equalSign = " = " @@ -411,15 +411,17 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { if len(sec.Comment) > 0 { if sec.Comment[0] != '#' && sec.Comment[0] != ';' { sec.Comment = "; " + sec.Comment + } else { + sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:]) } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { + return nil, err } } if i > 0 || DefaultHeader { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err } } else { // Write nothing if default section is empty @@ -429,8 +431,8 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } if sec.isRawSection { - if _, err = buf.WriteString(sec.rawBody); err != nil { - return 0, err + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err } continue } @@ -465,9 +467,11 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } if key.Comment[0] != '#' && key.Comment[0] != ';' { key.Comment = "; " + key.Comment + } else { + key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { + return nil, err } } @@ -485,8 +489,8 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } for _, val := range key.ValueWithShadows() { - if _, err = buf.WriteString(kname); err != nil { - return 0, err + if _, err := buf.WriteString(kname); err != nil { + return nil, err } if key.isBooleanType { @@ -504,21 +508,34 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { // In case key value contains "\n", "`", "\"", "#" or ";" if strings.ContainsAny(val, "\n`") { val = `"""` + val + `"""` - } else if strings.ContainsAny(val, "#;") { + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { val = "`" + val + "`" } - if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err } } } - // Put a line between sections - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } } } + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } return buf.WriteTo(w) } @@ -531,23 +548,12 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) + buf, err := f.writeToBuffer(indent) if err != nil { return err } - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) + return ioutil.WriteFile(filename, buf.Bytes(), 0666) } // SaveTo writes content to file system. diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go index 838356af0..0a2aa30e9 100644 --- a/vendor/gopkg.in/ini.v1/key.go +++ b/vendor/gopkg.in/ini.v1/key.go @@ -15,6 +15,7 @@ package ini import ( + "bytes" "errors" "fmt" "strconv" @@ -444,11 +445,39 @@ func (k *Key) Strings(delim string) []string { return []string{} } - vals := strings.Split(str, delim) - for i := range vals { - // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) - vals[i] = strings.TrimSpace(vals[i]) + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx += 1 + if idx == len(runes) { + break + } } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + return vals } diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index 6c0b10745..861e366e8 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -189,11 +189,11 @@ func (p *parser) readContinuationLines(val string) (string, error) { // are quotes \" or \'. // It returns false if any other parts also contain same kind of quotes. func hasSurroundedQuote(in string, quote byte) bool { - return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && strings.IndexByte(in[1:], quote) == len(in)-2 } -func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes bool) (string, error) { line := strings.TrimLeftFunc(string(in), unicode.IsSpace) if len(line) == 0 { return "", nil @@ -204,6 +204,8 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo valQuote = `"""` } else if line[0] == '`' { valQuote = "`" + } else if unescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` } if len(valQuote) > 0 { @@ -214,6 +216,9 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo return p.readMultilines(line, line[startIdx:], valQuote) } + if unescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } return line[startIdx : pos+startIdx], nil } @@ -234,7 +239,7 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo } } - // Trim single quotes + // Trim single and double quotes if hasSurroundedQuote(line, '\'') || hasSurroundedQuote(line, '"') { line = line[1 : len(line)-1] @@ -321,7 +326,10 @@ func (f *File) parse(reader io.Reader) (err error) { if err != nil { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + kname, err := p.readValue(line, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } @@ -344,7 +352,10 @@ func (f *File) parse(reader io.Reader) (err error) { p.count++ } - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + value, err := p.readValue(line[offset:], + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index 031c78b8e..e0bff67eb 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -78,7 +78,7 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() // setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { var strs []string if allowShadow { strs = key.StringsWithShadows(delim) @@ -92,26 +92,30 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh } var vals interface{} + var err error sliceOf := field.Type().Elem().Kind() switch sliceOf { case reflect.String: vals = strs case reflect.Int: - vals, _ = key.parseInts(strs, true, false) + vals, err = key.parseInts(strs, true, false) case reflect.Int64: - vals, _ = key.parseInt64s(strs, true, false) + vals, err = key.parseInt64s(strs, true, false) case reflect.Uint: - vals, _ = key.parseUints(strs, true, false) + vals, err = key.parseUints(strs, true, false) case reflect.Uint64: - vals, _ = key.parseUint64s(strs, true, false) + vals, err = key.parseUint64s(strs, true, false) case reflect.Float64: - vals, _ = key.parseFloat64s(strs, true, false) + vals, err = key.parseFloat64s(strs, true, false) case reflectTime: - vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false) + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) default: return fmt.Errorf("unsupported type '[]%s'", sliceOf) } + if err != nil && isStrict { + return err + } slice := reflect.MakeSlice(field.Type(), numVals, numVals) for i := 0; i < numVals; i++ { @@ -136,10 +140,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh return nil } +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -149,7 +160,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri case reflect.Bool: boolVal, err := key.Bool() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -161,8 +172,8 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } intVal, err := key.Int64() - if err != nil || intVal == 0 { - return nil + if err != nil { + return wrapStrictError(err, isStrict) } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte @@ -176,24 +187,24 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri uintVal, err := key.Uint64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetUint(uintVal) case reflect.Float32, reflect.Float64: floatVal, err := key.Float64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetFloat(floatVal) case reflectTime: timeVal, err := key.Time() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow) + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -212,7 +223,7 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo return rawName, omitEmpty, allowShadow } -func (s *Section) mapTo(val reflect.Value) error { +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -241,7 +252,7 @@ func (s *Section) mapTo(val reflect.Value) error { if isAnonymous || isStruct { if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { + if err = sec.mapTo(field, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } continue @@ -250,7 +261,7 @@ func (s *Section) mapTo(val reflect.Value) error { if key, err := s.GetKey(fieldName); err == nil { delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } @@ -269,7 +280,22 @@ func (s *Section) MapTo(v interface{}) error { return errors.New("cannot map to non-pointer struct") } - return s.mapTo(val) + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) } // MapTo maps file to given struct. @@ -277,6 +303,12 @@ func (f *File) MapTo(v interface{}) error { return f.Section("").MapTo(v) } +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + // MapTo maps data sources to given struct with name mapper. func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { cfg, err := Load(source, others...) @@ -287,11 +319,28 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other return cfg.MapTo(v) } +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + // MapTo maps data sources to given struct. func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + // reflectSliceWithProperType does the opposite thing as setSliceWithProperType. func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { slice := field.Slice(0, field.Len()) @@ -359,10 +408,11 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflectTime: - return v.Interface().(time.Time).IsZero() case reflect.Interface, reflect.Ptr: return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() } return false } @@ -400,6 +450,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { // Note: fieldName can never be empty here, ignore error. sec, _ = s.f.NewSection(fieldName) } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + if err = sec.reflectFrom(field); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } @@ -411,6 +467,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { if err != nil { key, _ = s.NewKey(fieldName, "") } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) }