mirror of
https://github.com/openfaas/faas.git
synced 2025-06-22 06:43:23 +00:00
Merge master into breakout_swarm
Signed-off-by: Alex Ellis <alexellis2@gmail.com>
This commit is contained in:
28
gateway/vendor/github.com/nats-io/go-nats-streaming/.gitignore
generated
vendored
Normal file
28
gateway/vendor/github.com/nats-io/go-nats-streaming/.gitignore
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# Eclipse stuff
|
||||
.project
|
||||
.settings/
|
22
gateway/vendor/github.com/nats-io/go-nats-streaming/.travis.yml
generated
vendored
Normal file
22
gateway/vendor/github.com/nats-io/go-nats-streaming/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.4
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get github.com/nats-io/nats-streaming-server
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/wadey/gocovmerge
|
||||
- go get honnef.co/go/staticcheck/cmd/staticcheck
|
||||
script:
|
||||
- go fmt ./...
|
||||
- go vet ./...
|
||||
- go test -i -race ./...
|
||||
- go test -v -race ./...
|
||||
- staticcheck ./...
|
||||
after_success:
|
||||
- if [ "$TRAVIS_GO_VERSION" \> "1.7." ]; then ./scripts/cov.sh TRAVIS; fi
|
||||
env:
|
||||
global:
|
||||
secure: OoCemKSHHH/SkkamHLWd0qh9qgQDx4/3fGuykYuzW/gjUhLlL0ThyUXOr3HOandoh3wTU8Ntj184WU6Sjh1oXzdDAYcI/ryNQXSmJ/DyGC6ffoj4Je/Rwj3sbwpaFTl1imawL8Lv6+5Dkb2JSbbbqapjbO3BhrrNfqLuQulqrLJKVaOyS5nOByiGFYsgjf/ac7Qrr9AnHhlkWRXoR+q8GlGG7qcKtLlmG5OqxifqfgQ+pcVtyeleT6zGPI0LUyr9gWHRZtMK9nYfxXuQK2d7V+SW4NBW1jdDKBHZbeJRxZ8N8rU8Nk3ka54YHXC2PeD8EloiAr5HkALuHbIdzyy40Y3rJyHfxyY6EYBcZEy+ZCRoqkVJ4NN4R46YE588BpYhT48YHK+lptM7YxrPtf08X+Cugc206X0hk/YFqqsaaNIwMfiTPbapuHxa8S4kgT2vDn3OTI53ZTrDiLVY3ZDp+EdUO1hiYFR6cpu5el/EQN5G0iW6sI69gOv26UmGI369D3fezbYPFPHHDao8xq7s8HdYUZleDNL0oCWK1MgL2g/Irbt5Kr6JjT/tpQOiiagqeR5dlV9mAiOZFr88gg7aqwOuSqmlULWVB4qYncQ6IBoednIHtrLW6H+2RfrZU01cI6tGSrXD+VoFnQ7aZwLxLc71VyN5khYPk0gGvyQhZxk=
|
11
gateway/vendor/github.com/nats-io/go-nats-streaming/README.md
generated
vendored
11
gateway/vendor/github.com/nats-io/go-nats-streaming/README.md
generated
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
NATS Streaming is an extremely performant, lightweight reliable streaming platform powered by [NATS](https://nats.io).
|
||||
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
[](http://travis-ci.org/nats-io/go-nats-streaming)
|
||||
[](https://coveralls.io/r/nats-io/go-nats-streaming?branch=master)
|
||||
|
||||
@ -17,6 +17,9 @@ NATS Streaming provides the following high-level feature set:
|
||||
|
||||
- Please raise questions/issues via the [Issue Tracker](https://github.com/nats-io/go-nats-streaming/issues).
|
||||
|
||||
## Known Issues
|
||||
- Time- and sequence-based subscriptions are exact. Requesting a time or seqno before the earliest stored message for a subject will result in an error (in SubscriptionRequest.Error)
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
@ -181,7 +184,7 @@ that is, the start position will take effect and delivery will start from there.
|
||||
|
||||
### Durable Queue Groups
|
||||
|
||||
As described above, for non durable queue subscribers, when the last member leaves the group,
|
||||
As described above, for non durable queue subsribers, when the last member leaves the group,
|
||||
that group is removed. A durable queue group allows you to have all members leave but still
|
||||
maintain state. When a member re-joins, it starts at the last position in that group.
|
||||
|
||||
@ -217,7 +220,7 @@ The rules for non-durable queue subscribers apply to durable subscribers.
|
||||
|
||||
As for non-durable queue subscribers, if a member's connection is closed, or if
|
||||
`Unsubscribe` its called, the member leaves the group. Any unacknowledged message
|
||||
is transferred to remaining members. See *Closing the Group* for important difference
|
||||
is transfered to remaining members. See *Closing the Group* for important difference
|
||||
with non-durable queue subscribers.
|
||||
|
||||
#### Closing the Group
|
||||
@ -298,7 +301,7 @@ ah := func(nuid string, err error) {
|
||||
}
|
||||
|
||||
for i := 1; i < 1000; i++ {
|
||||
// If the server is unable to keep up with the publisher, the number of outstanding acks will eventually
|
||||
// If the server is unable to keep up with the publisher, the number of oustanding acks will eventually
|
||||
// reach the max and this call will block
|
||||
guid, _ := sc.PublishAsync("foo", []byte("Hello World"), ah)
|
||||
}
|
||||
|
15
gateway/vendor/github.com/nats-io/go-nats-streaming/TODO.md
generated
vendored
Normal file
15
gateway/vendor/github.com/nats-io/go-nats-streaming/TODO.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
|
||||
- [ ] Retry limits?
|
||||
- [ ] Server Store Limits (time, msgs, byte)
|
||||
- [X] Change time to deltas
|
||||
- [X] Server heartbeat, release dead clients.
|
||||
- [X] Require clientID for published messages, error if not registered.
|
||||
- [X] Check for need of ackMap (out of order re-delivery to queue subscribers).
|
||||
- [X] Redelivered Flag for Msg.
|
||||
- [X] Queue Subscribers
|
||||
- [X] Durable Subscribers (survive reconnect, etc)
|
||||
- [X] Start Positions on Subscribers
|
||||
- [X] Ack for delivered just Reply? No need on ConnectedResponse?
|
||||
- [X] PublishWithReply, or option.
|
||||
- [X] Data Races in Server.
|
||||
- [X] Manual Ack?
|
245
gateway/vendor/github.com/nats-io/go-nats-streaming/benchmark_test.go
generated
vendored
Normal file
245
gateway/vendor/github.com/nats-io/go-nats-streaming/benchmark_test.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
||||
package stan
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Benchmarks
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkPublish(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Run a NATS Streaming server
|
||||
s := RunServer(clusterName)
|
||||
defer s.Shutdown()
|
||||
sc := NewDefaultConnection(b)
|
||||
defer sc.Close()
|
||||
|
||||
hw := []byte("Hello World")
|
||||
|
||||
b.StartTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := sc.Publish("foo", hw); err != nil {
|
||||
b.Fatalf("Got error on publish: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPublishAsync(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Run a NATS Streaming server
|
||||
s := RunServer(clusterName)
|
||||
defer s.Shutdown()
|
||||
sc := NewDefaultConnection(b)
|
||||
defer sc.Close()
|
||||
|
||||
hw := []byte("Hello World")
|
||||
|
||||
ch := make(chan bool)
|
||||
received := int32(0)
|
||||
|
||||
ah := func(guid string, err error) {
|
||||
if err != nil {
|
||||
b.Fatalf("Received an error in ack callback: %v\n", err)
|
||||
}
|
||||
if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) {
|
||||
ch <- true
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := sc.PublishAsync("foo", hw, ah); err != nil {
|
||||
//fmt.Printf("Client status %v, Server status %v\n", s.nc.Status(), (sc.(*conn)).nc.Status())
|
||||
fmt.Printf("len(ackmap) = %d\n", len(sc.(*conn).pubAckMap))
|
||||
|
||||
b.Fatalf("Error from PublishAsync: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := WaitTime(ch, 10*time.Second)
|
||||
if err != nil {
|
||||
fmt.Printf("sc error is %v\n", sc.(*conn).nc.LastError())
|
||||
b.Fatal("Timed out waiting for ack messages")
|
||||
} else if atomic.LoadInt32(&received) != int32(b.N) {
|
||||
b.Fatalf("Received: %d", received)
|
||||
}
|
||||
|
||||
// msgs, bytes, _ := sc.(*conn).ackSubscription.MaxPending()
|
||||
// fmt.Printf("max pending msgs:%d bytes:%d\n", msgs, bytes)
|
||||
}
|
||||
|
||||
func BenchmarkSubscribe(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Run a NATS Streaming server
|
||||
s := RunServer(clusterName)
|
||||
defer s.Shutdown()
|
||||
sc := NewDefaultConnection(b)
|
||||
defer sc.Close()
|
||||
|
||||
hw := []byte("Hello World")
|
||||
pch := make(chan bool)
|
||||
|
||||
// Queue up all the messages. Keep this outside of the timing.
|
||||
for i := 0; i < b.N; i++ {
|
||||
if i == b.N-1 {
|
||||
// last one
|
||||
sc.PublishAsync("foo", hw, func(lguid string, err error) {
|
||||
if err != nil {
|
||||
b.Fatalf("Got an error from ack handler, %v", err)
|
||||
}
|
||||
pch <- true
|
||||
})
|
||||
} else {
|
||||
sc.PublishAsync("foo", hw, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for published to finish
|
||||
if err := WaitTime(pch, 10*time.Second); err != nil {
|
||||
b.Fatalf("Error waiting for publish to finish\n")
|
||||
}
|
||||
|
||||
ch := make(chan bool)
|
||||
received := int32(0)
|
||||
|
||||
b.StartTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
sc.Subscribe("foo", func(m *Msg) {
|
||||
if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) {
|
||||
ch <- true
|
||||
}
|
||||
}, DeliverAllAvailable())
|
||||
|
||||
err := WaitTime(ch, 10*time.Second)
|
||||
nr := atomic.LoadInt32(&received)
|
||||
if err != nil {
|
||||
b.Fatalf("Timed out waiting for messages, received only %d of %d\n", nr, b.N)
|
||||
} else if nr != int32(b.N) {
|
||||
b.Fatalf("Only Received: %d of %d", received, b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQueueSubscribe(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Run a NATS Streaming server
|
||||
s := RunServer(clusterName)
|
||||
defer s.Shutdown()
|
||||
sc := NewDefaultConnection(b)
|
||||
defer sc.Close()
|
||||
|
||||
hw := []byte("Hello World")
|
||||
pch := make(chan bool)
|
||||
|
||||
// Queue up all the messages. Keep this outside of the timing.
|
||||
for i := 0; i < b.N; i++ {
|
||||
if i == b.N-1 {
|
||||
// last one
|
||||
sc.PublishAsync("foo", hw, func(lguid string, err error) {
|
||||
if err != nil {
|
||||
b.Fatalf("Got an error from ack handler, %v", err)
|
||||
}
|
||||
pch <- true
|
||||
})
|
||||
} else {
|
||||
sc.PublishAsync("foo", hw, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for published to finish
|
||||
if err := WaitTime(pch, 10*time.Second); err != nil {
|
||||
b.Fatalf("Error waiting for publish to finish\n")
|
||||
}
|
||||
|
||||
ch := make(chan bool)
|
||||
received := int32(0)
|
||||
|
||||
b.StartTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
mcb := func(m *Msg) {
|
||||
if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) {
|
||||
ch <- true
|
||||
}
|
||||
}
|
||||
|
||||
sc.QueueSubscribe("foo", "bar", mcb, DeliverAllAvailable())
|
||||
sc.QueueSubscribe("foo", "bar", mcb, DeliverAllAvailable())
|
||||
sc.QueueSubscribe("foo", "bar", mcb, DeliverAllAvailable())
|
||||
sc.QueueSubscribe("foo", "bar", mcb, DeliverAllAvailable())
|
||||
|
||||
err := WaitTime(ch, 20*time.Second)
|
||||
nr := atomic.LoadInt32(&received)
|
||||
if err != nil {
|
||||
b.Fatalf("Timed out waiting for messages, received only %d of %d\n", nr, b.N)
|
||||
} else if nr != int32(b.N) {
|
||||
b.Fatalf("Only Received: %d of %d", received, b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPublishSubscribe(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Run a NATS Streaming server
|
||||
s := RunServer(clusterName)
|
||||
defer s.Shutdown()
|
||||
sc := NewDefaultConnection(b)
|
||||
defer sc.Close()
|
||||
|
||||
hw := []byte("Hello World")
|
||||
|
||||
ch := make(chan bool)
|
||||
received := int32(0)
|
||||
|
||||
// Subscribe callback, counts msgs received.
|
||||
_, err := sc.Subscribe("foo", func(m *Msg) {
|
||||
if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) {
|
||||
ch <- true
|
||||
}
|
||||
}, DeliverAllAvailable())
|
||||
|
||||
if err != nil {
|
||||
b.Fatalf("Error subscribing, %v", err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := sc.PublishAsync("foo", hw, func(guid string, err error) {
|
||||
if err != nil {
|
||||
b.Fatalf("Received an error in publish ack callback: %v\n", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatalf("Error publishing %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = WaitTime(ch, 30*time.Second)
|
||||
nr := atomic.LoadInt32(&received)
|
||||
if err != nil {
|
||||
b.Fatalf("Timed out waiting for messages, received only %d of %d\n", nr, b.N)
|
||||
} else if nr != int32(b.N) {
|
||||
b.Fatalf("Only Received: %d of %d", received, b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTimeNow(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
now := time.Now()
|
||||
now.Add(10 * time.Nanosecond)
|
||||
}
|
||||
}
|
194
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-bench.go
generated
vendored
Normal file
194
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-bench.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/go-nats"
|
||||
"github.com/nats-io/go-nats-streaming"
|
||||
"github.com/nats-io/go-nats/bench"
|
||||
)
|
||||
|
||||
// Some sane defaults
|
||||
const (
|
||||
DefaultNumMsgs = 100000
|
||||
DefaultNumPubs = 1
|
||||
DefaultNumSubs = 0
|
||||
DefaultAsync = false
|
||||
DefaultMessageSize = 128
|
||||
DefaultIgnoreOld = false
|
||||
DefaultMaxPubAcksInflight = 1000
|
||||
DefaultClientID = "benchmark"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-bench [-s server (%s)] [--tls] [-id CLIENT_ID] [-np NUM_PUBLISHERS] [-ns NUM_SUBSCRIBERS] [-n NUM_MSGS] [-ms MESSAGE_SIZE] [-csv csvfile] [-mpa MAX_NUMBER_OF_PUBLISHED_ACKS_INFLIGHT] [-io] [-a] <subject>\n", nats.DefaultURL)
|
||||
}
|
||||
|
||||
var benchmark *bench.Benchmark
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The NATS server URLs (separated by comma")
|
||||
var tls = flag.Bool("tls", false, "Use TLS secure sonnection")
|
||||
var numPubs = flag.Int("np", DefaultNumPubs, "Number of concurrent publishers")
|
||||
var numSubs = flag.Int("ns", DefaultNumSubs, "Number of concurrent subscribers")
|
||||
var numMsgs = flag.Int("n", DefaultNumMsgs, "Number of messages to publish")
|
||||
var async = flag.Bool("a", DefaultAsync, "Async message publishing")
|
||||
var messageSize = flag.Int("ms", DefaultMessageSize, "Message size in bytes.")
|
||||
var ignoreOld = flag.Bool("io", DefaultIgnoreOld, "Subscribers ignore old messages")
|
||||
var maxPubAcks = flag.Int("mpa", DefaultMaxPubAcksInflight, "Max number of published acks in flight")
|
||||
var clientID = flag.String("id", DefaultClientID, "Benchmark process base client ID")
|
||||
var csvFile = flag.String("csv", "", "Save bench data to csv file")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
// Setup the option block
|
||||
opts := nats.DefaultOptions
|
||||
opts.Servers = strings.Split(*urls, ",")
|
||||
for i, s := range opts.Servers {
|
||||
opts.Servers[i] = strings.Trim(s, " ")
|
||||
}
|
||||
|
||||
opts.Secure = *tls
|
||||
|
||||
benchmark = bench.NewBenchmark("NATS Streaming", *numSubs, *numPubs)
|
||||
|
||||
var startwg sync.WaitGroup
|
||||
var donewg sync.WaitGroup
|
||||
|
||||
donewg.Add(*numPubs + *numSubs)
|
||||
|
||||
// Run Subscribers first
|
||||
startwg.Add(*numSubs)
|
||||
for i := 0; i < *numSubs; i++ {
|
||||
subID := fmt.Sprintf("%s-sub-%d", *clientID, i)
|
||||
go runSubscriber(&startwg, &donewg, opts, *numMsgs, *messageSize, *ignoreOld, subID)
|
||||
}
|
||||
startwg.Wait()
|
||||
|
||||
// Now Publishers
|
||||
startwg.Add(*numPubs)
|
||||
pubCounts := bench.MsgsPerClient(*numMsgs, *numPubs)
|
||||
for i := 0; i < *numPubs; i++ {
|
||||
pubID := fmt.Sprintf("%s-pub-%d", *clientID, i)
|
||||
go runPublisher(&startwg, &donewg, opts, pubCounts[i], *messageSize, *async, pubID, *maxPubAcks)
|
||||
}
|
||||
|
||||
log.Printf("Starting benchmark [msgs=%d, msgsize=%d, pubs=%d, subs=%d]\n", *numMsgs, *messageSize, *numPubs, *numSubs)
|
||||
|
||||
startwg.Wait()
|
||||
donewg.Wait()
|
||||
|
||||
benchmark.Close()
|
||||
fmt.Print(benchmark.Report())
|
||||
|
||||
if len(*csvFile) > 0 {
|
||||
csv := benchmark.CSV()
|
||||
ioutil.WriteFile(*csvFile, []byte(csv), 0644)
|
||||
fmt.Printf("Saved metric data in csv file %s\n", *csvFile)
|
||||
}
|
||||
}
|
||||
|
||||
func runPublisher(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int, msgSize int, async bool, pubID string, maxPubAcksInflight int) {
|
||||
nc, err := opts.Connect()
|
||||
if err != nil {
|
||||
log.Fatalf("Publisher %s can't connect: %v\n", pubID, err)
|
||||
}
|
||||
snc, err := stan.Connect("test-cluster", pubID, stan.MaxPubAcksInflight(maxPubAcksInflight), stan.NatsConn(nc))
|
||||
if err != nil {
|
||||
log.Fatalf("Publisher %s can't connect: %v\n", pubID, err)
|
||||
}
|
||||
|
||||
startwg.Done()
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
subj := args[0]
|
||||
var msg []byte
|
||||
if msgSize > 0 {
|
||||
msg = make([]byte, msgSize)
|
||||
}
|
||||
published := 0
|
||||
start := time.Now()
|
||||
|
||||
if async {
|
||||
ch := make(chan bool)
|
||||
acb := func(lguid string, err error) {
|
||||
published++
|
||||
if published >= numMsgs {
|
||||
ch <- true
|
||||
}
|
||||
}
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
_, err := snc.PublishAsync(subj, msg, acb)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
<-ch
|
||||
} else {
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
err := snc.Publish(subj, msg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
published++
|
||||
}
|
||||
}
|
||||
|
||||
benchmark.AddPubSample(bench.NewSample(numMsgs, msgSize, start, time.Now(), snc.NatsConn()))
|
||||
snc.Close()
|
||||
nc.Close()
|
||||
donewg.Done()
|
||||
}
|
||||
|
||||
func runSubscriber(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int, msgSize int, ignoreOld bool, subID string) {
|
||||
nc, err := opts.Connect()
|
||||
if err != nil {
|
||||
log.Fatalf("Subscriber %s can't connect: %v\n", subID, err)
|
||||
}
|
||||
snc, err := stan.Connect("test-cluster", subID, stan.NatsConn(nc))
|
||||
if err != nil {
|
||||
log.Fatalf("Subscriber %s can't connect: %v\n", subID, err)
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
subj := args[0]
|
||||
ch := make(chan bool)
|
||||
start := time.Now()
|
||||
|
||||
received := 0
|
||||
mcb := func(msg *stan.Msg) {
|
||||
received++
|
||||
if received >= numMsgs {
|
||||
ch <- true
|
||||
}
|
||||
}
|
||||
|
||||
if ignoreOld {
|
||||
snc.Subscribe(subj, mcb)
|
||||
} else {
|
||||
snc.Subscribe(subj, mcb, stan.DeliverAllAvailable())
|
||||
}
|
||||
startwg.Done()
|
||||
|
||||
<-ch
|
||||
benchmark.AddSubSample(bench.NewSample(numMsgs, msgSize, start, time.Now(), snc.NatsConn()))
|
||||
snc.Close()
|
||||
nc.Close()
|
||||
donewg.Done()
|
||||
}
|
108
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-pub.go
generated
vendored
Normal file
108
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-pub.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/go-nats-streaming"
|
||||
)
|
||||
|
||||
var usageStr = `
|
||||
Usage: stan-pub [options] <subject> <message>
|
||||
|
||||
Options:
|
||||
-s, --server <url> NATS Streaming server URL(s)
|
||||
-c, --cluster <cluster name> NATS Streaming cluster name
|
||||
-id,--clientid <client ID> NATS Streaming client ID
|
||||
-a, --async Asynchronous publish mode
|
||||
`
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. stan-pub -s tls://demo.nats.io:4443 foo hello
|
||||
func usage() {
|
||||
fmt.Printf("%s\n", usageStr)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var clusterID string
|
||||
var clientID string
|
||||
var async bool
|
||||
var URL string
|
||||
|
||||
flag.StringVar(&URL, "s", stan.DefaultNatsURL, "The nats server URLs (separated by comma)")
|
||||
flag.StringVar(&URL, "server", stan.DefaultNatsURL, "The nats server URLs (separated by comma)")
|
||||
flag.StringVar(&clusterID, "c", "test-cluster", "The NATS Streaming cluster ID")
|
||||
flag.StringVar(&clusterID, "cluster", "test-cluster", "The NATS Streaming cluster ID")
|
||||
flag.StringVar(&clientID, "id", "stan-pub", "The NATS Streaming client ID to connect with")
|
||||
flag.StringVar(&clientID, "clientid", "stan-pub", "The NATS Streaming client ID to connect with")
|
||||
flag.BoolVar(&async, "a", false, "Publish asynchronously")
|
||||
flag.BoolVar(&async, "async", false, "Publish asynchronously")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
sc, err := stan.Connect(clusterID, clientID, stan.NatsURL(URL))
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v.\nMake sure a NATS Streaming Server is running at: %s", err, URL)
|
||||
}
|
||||
defer sc.Close()
|
||||
|
||||
subj, msg := args[0], []byte(args[1])
|
||||
|
||||
ch := make(chan bool)
|
||||
var glock sync.Mutex
|
||||
var guid string
|
||||
acb := func(lguid string, err error) {
|
||||
glock.Lock()
|
||||
log.Printf("Received ACK for guid %s\n", lguid)
|
||||
defer glock.Unlock()
|
||||
if err != nil {
|
||||
log.Fatalf("Error in server ack for guid %s: %v\n", lguid, err)
|
||||
}
|
||||
if lguid != guid {
|
||||
log.Fatalf("Expected a matching guid in ack callback, got %s vs %s\n", lguid, guid)
|
||||
}
|
||||
ch <- true
|
||||
}
|
||||
|
||||
if async != true {
|
||||
err = sc.Publish(subj, msg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error during publish: %v\n", err)
|
||||
}
|
||||
log.Printf("Published [%s] : '%s'\n", subj, msg)
|
||||
} else {
|
||||
glock.Lock()
|
||||
guid, err = sc.PublishAsync(subj, msg, acb)
|
||||
if err != nil {
|
||||
log.Fatalf("Error during async publish: %v\n", err)
|
||||
}
|
||||
glock.Unlock()
|
||||
if guid == "" {
|
||||
log.Fatal("Expected non-empty guid to be returned.")
|
||||
}
|
||||
log.Printf("Published [%s] : '%s' [guid: %s]\n", subj, msg, guid)
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
break
|
||||
case <-time.After(5 * time.Second):
|
||||
log.Fatal("timeout")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
152
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-sub.go
generated
vendored
Normal file
152
gateway/vendor/github.com/nats-io/go-nats-streaming/examples/stan-sub.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/go-nats-streaming"
|
||||
"github.com/nats-io/go-nats-streaming/pb"
|
||||
)
|
||||
|
||||
var usageStr = `
|
||||
Usage: stan-sub [options] <subject>
|
||||
|
||||
Options:
|
||||
-s, --server <url> NATS Streaming server URL(s)
|
||||
-c, --cluster <cluster name> NATS Streaming cluster name
|
||||
-id,--clientid <client ID> NATS Streaming client ID
|
||||
|
||||
Subscription Options:
|
||||
--qgroup <name> Queue group
|
||||
--seq <seqno> Start at seqno
|
||||
--all Deliver all available messages
|
||||
--last Deliver starting with last published message
|
||||
--since <duration> Deliver messages in last interval (e.g. 1s, 1hr)
|
||||
(for more information: https://golang.org/pkg/time/#ParseDuration)
|
||||
--durable <name> Durable subscriber name
|
||||
--unsubscribe Unsubscribe the durable on exit
|
||||
`
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. stan-sub -s tls://demo.nats.io:4443 foo
|
||||
func usage() {
|
||||
log.Fatalf(usageStr)
|
||||
}
|
||||
|
||||
func printMsg(m *stan.Msg, i int) {
|
||||
log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, m)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var clusterID string
|
||||
var clientID string
|
||||
var showTime bool
|
||||
var startSeq uint64
|
||||
var startDelta string
|
||||
var deliverAll bool
|
||||
var deliverLast bool
|
||||
var durable string
|
||||
var qgroup string
|
||||
var unsubscribe bool
|
||||
var URL string
|
||||
|
||||
// defaultID := fmt.Sprintf("client.%s", nuid.Next())
|
||||
|
||||
flag.StringVar(&URL, "s", stan.DefaultNatsURL, "The nats server URLs (separated by comma)")
|
||||
flag.StringVar(&URL, "server", stan.DefaultNatsURL, "The nats server URLs (separated by comma)")
|
||||
flag.StringVar(&clusterID, "c", "test-cluster", "The NATS Streaming cluster ID")
|
||||
flag.StringVar(&clusterID, "cluster", "test-cluster", "The NATS Streaming cluster ID")
|
||||
flag.StringVar(&clientID, "id", "", "The NATS Streaming client ID to connect with")
|
||||
flag.StringVar(&clientID, "clientid", "", "The NATS Streaming client ID to connect with")
|
||||
flag.BoolVar(&showTime, "t", false, "Display timestamps")
|
||||
// Subscription options
|
||||
flag.Uint64Var(&startSeq, "seq", 0, "Start at sequence no.")
|
||||
flag.BoolVar(&deliverAll, "all", false, "Deliver all")
|
||||
flag.BoolVar(&deliverLast, "last", false, "Start with last value")
|
||||
flag.StringVar(&startDelta, "since", "", "Deliver messages since specified time offset")
|
||||
flag.StringVar(&durable, "durable", "", "Durable subscriber name")
|
||||
flag.StringVar(&qgroup, "qgroup", "", "Queue group name")
|
||||
flag.BoolVar(&unsubscribe, "unsubscribe", false, "Unsubscribe the durable on exit")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
if clientID == "" {
|
||||
log.Printf("Error: A unique client ID must be specified.")
|
||||
usage()
|
||||
}
|
||||
if len(args) < 1 {
|
||||
log.Printf("Error: A subject must be specified.")
|
||||
usage()
|
||||
}
|
||||
|
||||
sc, err := stan.Connect(clusterID, clientID, stan.NatsURL(URL))
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v.\nMake sure a NATS Streaming Server is running at: %s", err, URL)
|
||||
}
|
||||
log.Printf("Connected to %s clusterID: [%s] clientID: [%s]\n", URL, clusterID, clientID)
|
||||
|
||||
subj, i := args[0], 0
|
||||
|
||||
mcb := func(msg *stan.Msg) {
|
||||
i++
|
||||
printMsg(msg, i)
|
||||
}
|
||||
|
||||
startOpt := stan.StartAt(pb.StartPosition_NewOnly)
|
||||
|
||||
if startSeq != 0 {
|
||||
startOpt = stan.StartAtSequence(startSeq)
|
||||
} else if deliverLast == true {
|
||||
startOpt = stan.StartWithLastReceived()
|
||||
} else if deliverAll == true {
|
||||
log.Print("subscribing with DeliverAllAvailable")
|
||||
startOpt = stan.DeliverAllAvailable()
|
||||
} else if startDelta != "" {
|
||||
ago, err := time.ParseDuration(startDelta)
|
||||
if err != nil {
|
||||
sc.Close()
|
||||
log.Fatal(err)
|
||||
}
|
||||
startOpt = stan.StartAtTimeDelta(ago)
|
||||
}
|
||||
|
||||
sub, err := sc.QueueSubscribe(subj, qgroup, mcb, startOpt, stan.DurableName(durable))
|
||||
if err != nil {
|
||||
sc.Close()
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Printf("Listening on [%s], clientID=[%s], qgroup=[%s] durable=[%s]\n", subj, clientID, qgroup, durable)
|
||||
|
||||
if showTime {
|
||||
log.SetFlags(log.LstdFlags)
|
||||
}
|
||||
|
||||
// Wait for a SIGINT (perhaps triggered by user with CTRL-C)
|
||||
// Run cleanup when signal is received
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
cleanupDone := make(chan bool)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
go func() {
|
||||
for _ = range signalChan {
|
||||
fmt.Printf("\nReceived an interrupt, unsubscribing and closing connection...\n\n")
|
||||
// Do not unsubscribe a durable on exit, except if asked to.
|
||||
if durable == "" || unsubscribe {
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
sc.Close()
|
||||
cleanupDone <- true
|
||||
}
|
||||
}()
|
||||
<-cleanupDone
|
||||
}
|
16
gateway/vendor/github.com/nats-io/go-nats-streaming/scripts/cov.sh
generated
vendored
Executable file
16
gateway/vendor/github.com/nats-io/go-nats-streaming/scripts/cov.sh
generated
vendored
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
# Run from directory above via ./scripts/cov.sh
|
||||
|
||||
rm -rf ./cov
|
||||
mkdir cov
|
||||
go test -v -covermode=atomic -coverprofile=./cov/stan.out
|
||||
gocovmerge ./cov/*.out > acc.out
|
||||
rm -rf ./cov
|
||||
|
||||
# If we have an arg, assume travis run and push to coveralls. Otherwise launch browser results
|
||||
if [[ -n $1 ]]; then
|
||||
$HOME/gopath/bin/goveralls -coverprofile=acc.out
|
||||
rm -rf ./acc.out
|
||||
else
|
||||
go tool cover -html=acc.out
|
||||
fi
|
36
gateway/vendor/github.com/nats-io/go-nats-streaming/stan.go
generated
vendored
36
gateway/vendor/github.com/nats-io/go-nats-streaming/stan.go
generated
vendored
@ -71,7 +71,7 @@ var (
|
||||
)
|
||||
|
||||
// AckHandler is used for Async Publishing to provide status of the ack.
|
||||
// The func will be passed the GUID and any error state. No error means the
|
||||
// The func will be passed teh GUID and any error state. No error means the
|
||||
// message was successfully received by NATS Streaming.
|
||||
type AckHandler func(string, error)
|
||||
|
||||
@ -144,6 +144,7 @@ func NatsConn(nc *nats.Conn) Option {
|
||||
type conn struct {
|
||||
sync.RWMutex
|
||||
clientID string
|
||||
serverID string
|
||||
pubPrefix string // Publish prefix set by stan, append our subject.
|
||||
subRequests string // Subject to send subscription requests.
|
||||
unsubRequests string // Subject to send unsubscribe requests.
|
||||
@ -168,7 +169,6 @@ type ack struct {
|
||||
}
|
||||
|
||||
// Connect will form a connection to the NATS Streaming subsystem.
|
||||
// Note that clientID can contain only alphanumeric and `-` or `_` characters.
|
||||
func Connect(stanClusterID, clientID string, options ...Option) (Conn, error) {
|
||||
// Process Options
|
||||
c := conn{clientID: clientID, opts: DefaultOptions}
|
||||
@ -181,7 +181,7 @@ func Connect(stanClusterID, clientID string, options ...Option) (Conn, error) {
|
||||
c.nc = c.opts.NatsConn
|
||||
// Create a NATS connection if it doesn't exist.
|
||||
if c.nc == nil {
|
||||
nc, err := nats.Connect(c.opts.NatsURL, nats.Name(clientID))
|
||||
nc, err := nats.Connect(c.opts.NatsURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -253,6 +253,10 @@ func Connect(stanClusterID, clientID string, options ...Option) (Conn, error) {
|
||||
|
||||
// Close a connection to the stan system.
|
||||
func (sc *conn) Close() error {
|
||||
if sc == nil {
|
||||
return ErrBadConnection
|
||||
}
|
||||
|
||||
sc.Lock()
|
||||
defer sc.Unlock()
|
||||
|
||||
@ -318,7 +322,9 @@ func (sc *conn) processAck(m *nats.Msg) {
|
||||
pa := &pb.PubAck{}
|
||||
err := pa.Unmarshal(m.Data)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error during ack unmarshal: %v", err))
|
||||
// FIXME, make closure to have context?
|
||||
fmt.Printf("Error processing unmarshal\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Remove
|
||||
@ -389,16 +395,11 @@ func (sc *conn) publishAsync(subject string, data []byte, ah AckHandler, ch chan
|
||||
// Setup the timer for expiration.
|
||||
sc.Lock()
|
||||
a.t = time.AfterFunc(ackTimeout, func() {
|
||||
pubAck := sc.removeAck(peGUID)
|
||||
// processAck could get here before and handle the ack.
|
||||
// If that's the case, we would get nil here and simply return.
|
||||
if pubAck == nil {
|
||||
return
|
||||
}
|
||||
if pubAck.ah != nil {
|
||||
pubAck.ah(peGUID, ErrTimeout)
|
||||
sc.removeAck(peGUID)
|
||||
if a.ah != nil {
|
||||
ah(peGUID, ErrTimeout)
|
||||
} else if a.ch != nil {
|
||||
pubAck.ch <- ErrTimeout
|
||||
a.ch <- ErrTimeout
|
||||
}
|
||||
})
|
||||
sc.Unlock()
|
||||
@ -435,7 +436,7 @@ func (sc *conn) processMsg(raw *nats.Msg) {
|
||||
msg := &Msg{}
|
||||
err := msg.Unmarshal(raw.Data)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error processing unmarshal for msg: %v", err))
|
||||
panic("Error processing unmarshal for msg")
|
||||
}
|
||||
// Lookup the subscription
|
||||
sc.RLock()
|
||||
@ -464,11 +465,12 @@ func (sc *conn) processMsg(raw *nats.Msg) {
|
||||
cb(msg)
|
||||
}
|
||||
|
||||
// Process auto-ack
|
||||
// Proces auto-ack
|
||||
if !isManualAck && nc != nil {
|
||||
ack := &pb.Ack{Subject: msg.Subject, Sequence: msg.Sequence}
|
||||
b, _ := ack.Marshal()
|
||||
// FIXME(dlc) - Async error handler? Retry?
|
||||
nc.Publish(ackSubject, b)
|
||||
if err := nc.Publish(ackSubject, b); err != nil {
|
||||
// FIXME(dlc) - Async error handler? Retry?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2053
gateway/vendor/github.com/nats-io/go-nats-streaming/stan_test.go
generated
vendored
Normal file
2053
gateway/vendor/github.com/nats-io/go-nats-streaming/stan_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
115
gateway/vendor/github.com/nats-io/go-nats-streaming/sub.go
generated
vendored
115
gateway/vendor/github.com/nats-io/go-nats-streaming/sub.go
generated
vendored
@ -30,14 +30,6 @@ type Msg struct {
|
||||
// Subscription represents a subscription within the NATS Streaming cluster. Subscriptions
|
||||
// will be rate matched and follow at-least delivery semantics.
|
||||
type Subscription interface {
|
||||
ClearMaxPending() error
|
||||
Delivered() (int64, error)
|
||||
Dropped() (int, error)
|
||||
IsValid() bool
|
||||
MaxPending() (int, int, error)
|
||||
Pending() (int, int, error)
|
||||
PendingLimits() (int, int, error)
|
||||
SetPendingLimits(msgLimit, bytesLimit int) error
|
||||
// Unsubscribe removes interest in the subscription.
|
||||
// For durables, it means that the durable interest is also removed from
|
||||
// the server. Restarting a durable with the same name will not resume
|
||||
@ -264,97 +256,12 @@ func (sc *conn) subscribe(subject, qgroup string, cb MsgHandler, options ...Subs
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
// ClearMaxPending resets the maximums seen so far.
|
||||
func (sub *subscription) ClearMaxPending() error {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.ClearMaxPending()
|
||||
}
|
||||
|
||||
// Delivered returns the number of delivered messages for this subscription.
|
||||
func (sub *subscription) Delivered() (int64, error) {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return -1, ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.Delivered()
|
||||
}
|
||||
|
||||
// Dropped returns the number of known dropped messages for this subscription.
|
||||
// This will correspond to messages dropped by violations of PendingLimits. If
|
||||
// the server declares the connection a SlowConsumer, this number may not be
|
||||
// valid.
|
||||
func (sub *subscription) Dropped() (int, error) {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return -1, ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.Dropped()
|
||||
}
|
||||
|
||||
// IsValid returns a boolean indicating whether the subscription
|
||||
// is still active. This will return false if the subscription has
|
||||
// already been closed.
|
||||
func (sub *subscription) IsValid() bool {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return false
|
||||
}
|
||||
return sub.inboxSub.IsValid()
|
||||
}
|
||||
|
||||
// MaxPending returns the maximum number of queued messages and queued bytes seen so far.
|
||||
func (sub *subscription) MaxPending() (int, int, error) {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return -1, -1, ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.MaxPending()
|
||||
}
|
||||
|
||||
// Pending returns the number of queued messages and queued bytes in the client for this subscription.
|
||||
func (sub *subscription) Pending() (int, int, error) {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return -1, -1, ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.Pending()
|
||||
}
|
||||
|
||||
// PendingLimits returns the current limits for this subscription.
|
||||
// If no error is returned, a negative value indicates that the
|
||||
// given metric is not limited.
|
||||
func (sub *subscription) PendingLimits() (int, int, error) {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return -1, -1, ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.PendingLimits()
|
||||
}
|
||||
|
||||
// SetPendingLimits sets the limits for pending msgs and bytes for this subscription.
|
||||
// Zero is not allowed. Any negative value means that the given metric is not limited.
|
||||
func (sub *subscription) SetPendingLimits(msgLimit, bytesLimit int) error {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
if sub.inboxSub == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
return sub.inboxSub.SetPendingLimits(msgLimit, bytesLimit)
|
||||
}
|
||||
|
||||
// closeOrUnsubscribe performs either close or unsubsribe based on
|
||||
// given boolean.
|
||||
func (sub *subscription) closeOrUnsubscribe(doClose bool) error {
|
||||
if sub == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
sub.Lock()
|
||||
sc := sub.sc
|
||||
if sc == nil {
|
||||
@ -367,6 +274,10 @@ func (sub *subscription) closeOrUnsubscribe(doClose bool) error {
|
||||
sub.inboxSub = nil
|
||||
sub.Unlock()
|
||||
|
||||
if sc == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
|
||||
sc.Lock()
|
||||
if sc.nc == nil {
|
||||
sc.Unlock()
|
||||
@ -431,8 +342,12 @@ func (msg *Msg) Ack() error {
|
||||
if msg == nil {
|
||||
return ErrNilMsg
|
||||
}
|
||||
// Look up subscription (cannot be nil)
|
||||
// Look up subscription
|
||||
sub := msg.Sub.(*subscription)
|
||||
if sub == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
|
||||
sub.RLock()
|
||||
ackSubject := sub.ackInbox
|
||||
isManualAck := sub.opts.ManualAcks
|
||||
@ -440,9 +355,6 @@ func (msg *Msg) Ack() error {
|
||||
sub.RUnlock()
|
||||
|
||||
// Check for error conditions.
|
||||
if !isManualAck {
|
||||
return ErrManualAck
|
||||
}
|
||||
if sc == nil {
|
||||
return ErrBadSubscription
|
||||
}
|
||||
@ -453,6 +365,9 @@ func (msg *Msg) Ack() error {
|
||||
if nc == nil {
|
||||
return ErrBadConnection
|
||||
}
|
||||
if !isManualAck {
|
||||
return ErrManualAck
|
||||
}
|
||||
|
||||
// Ack here.
|
||||
ack := &pb.Ack{Subject: msg.Subject, Sequence: msg.Sequence}
|
||||
|
Reference in New Issue
Block a user