Vendor new queue-worker version

Introduces 0.4.6 of queue-worker - see upstream repo for changes.

Signed-off-by: Alex Ellis (VMware) <alexellis2@gmail.com>
This commit is contained in:
Alex Ellis (VMware)
2018-06-18 20:09:33 +01:00
parent 41bda568a7
commit 223c561706
81 changed files with 4701 additions and 1323 deletions

View File

@ -1,7 +1,22 @@
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"fmt"
"math"
"net"
"regexp"
"runtime"
"strings"
@ -9,6 +24,7 @@ import (
"testing"
"time"
"github.com/nats-io/gnatsd/server"
"github.com/nats-io/gnatsd/test"
"github.com/nats-io/go-nats"
)
@ -25,6 +41,17 @@ var testServers = []string{
var servers = strings.Join(testServers, ",")
func serverVersionAtLeast(major, minor, update int) error {
var (
ma, mi, up int
)
fmt.Sscanf(server.VERSION, "%d.%d.%d", &ma, &mi, &up)
if ma > major || (ma == major && mi > minor) || (ma == major && mi == minor && up >= update) {
return nil
}
return fmt.Errorf("Server version is %v, requires %d.%d.%d+", server.VERSION, major, minor, update)
}
func TestServersOption(t *testing.T) {
opts := nats.GetDefaultOptions()
opts.NoRandomize = true
@ -604,3 +631,230 @@ func TestPingReconnect(t *testing.T) {
}
}
}
type checkPoolUpdatedDialer struct {
conn net.Conn
first, final bool
ra int
}
func (d *checkPoolUpdatedDialer) Dial(network, address string) (net.Conn, error) {
doReal := false
if d.first {
d.first = false
doReal = true
} else if d.final {
d.ra++
return nil, fmt.Errorf("On purpose")
} else {
d.ra++
if d.ra == 15 {
d.ra = 0
doReal = true
}
}
if doReal {
c, err := net.Dial(network, address)
if err != nil {
return nil, err
}
d.conn = c
return c, nil
}
return nil, fmt.Errorf("On purpose")
}
func TestServerPoolUpdatedWhenRouteGoesAway(t *testing.T) {
if err := serverVersionAtLeast(1, 0, 7); err != nil {
t.Skipf(err.Error())
}
s1Opts := test.DefaultTestOptions
s1Opts.Host = "127.0.0.1"
s1Opts.Port = 4222
s1Opts.Cluster.Host = "127.0.0.1"
s1Opts.Cluster.Port = 6222
s1Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6223,nats://127.0.0.1:6224")
s1 := test.RunServer(&s1Opts)
defer s1.Shutdown()
s1Url := "nats://127.0.0.1:4222"
s2Url := "nats://127.0.0.1:4223"
s3Url := "nats://127.0.0.1:4224"
ch := make(chan bool, 1)
chch := make(chan bool, 1)
connHandler := func(_ *nats.Conn) {
chch <- true
}
nc, err := nats.Connect(s1Url,
nats.ReconnectHandler(connHandler),
nats.DiscoveredServersHandler(func(_ *nats.Conn) {
ch <- true
}))
if err != nil {
t.Fatalf("Error on connect")
}
s2Opts := test.DefaultTestOptions
s2Opts.Host = "127.0.0.1"
s2Opts.Port = s1Opts.Port + 1
s2Opts.Cluster.Host = "127.0.0.1"
s2Opts.Cluster.Port = 6223
s2Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222,nats://127.0.0.1:6224")
s2 := test.RunServer(&s2Opts)
defer s2.Shutdown()
// Wait to be notified
if err := Wait(ch); err != nil {
t.Fatal("New server callback was not invoked")
}
checkPool := func(expected []string) {
// Don't use discovered here, but Servers to have the full list.
// Also, there may be cases where the mesh is not formed yet,
// so try again on failure.
var (
ds []string
timeout = time.Now().Add(5 * time.Second)
)
for time.Now().Before(timeout) {
ds = nc.Servers()
if len(ds) == len(expected) {
m := make(map[string]struct{}, len(ds))
for _, url := range ds {
m[url] = struct{}{}
}
ok := true
for _, url := range expected {
if _, present := m[url]; !present {
ok = false
break
}
}
if ok {
return
}
}
time.Sleep(50 * time.Millisecond)
}
stackFatalf(t, "Expected %v, got %v", expected, ds)
}
// Verify that we now know about s2
checkPool([]string{s1Url, s2Url})
s3Opts := test.DefaultTestOptions
s3Opts.Host = "127.0.0.1"
s3Opts.Port = s2Opts.Port + 1
s3Opts.Cluster.Host = "127.0.0.1"
s3Opts.Cluster.Port = 6224
s3Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222,nats://127.0.0.1:6223")
s3 := test.RunServer(&s3Opts)
defer s3.Shutdown()
// Wait to be notified
if err := Wait(ch); err != nil {
t.Fatal("New server callback was not invoked")
}
// Verify that we now know about s3
checkPool([]string{s1Url, s2Url, s3Url})
// Stop s1. Since this was passed to the Connect() call, this one should
// still be present.
s1.Shutdown()
// Wait for reconnect
if err := Wait(chch); err != nil {
t.Fatal("Reconnect handler not invoked")
}
checkPool([]string{s1Url, s2Url, s3Url})
// Check the server we reconnected to.
reConnectedTo := nc.ConnectedUrl()
expected := []string{s1Url}
restartS2 := false
if reConnectedTo == s2Url {
restartS2 = true
s2.Shutdown()
expected = append(expected, s3Url)
} else if reConnectedTo == s3Url {
s3.Shutdown()
expected = append(expected, s2Url)
} else {
t.Fatalf("Unexpected server client has reconnected to: %v", reConnectedTo)
}
// Wait for reconnect
if err := Wait(chch); err != nil {
t.Fatal("Reconnect handler not invoked")
}
// The implicit server that we just shutdown should have been removed from the pool
checkPool(expected)
// Restart the one that was shutdown and check that it is now back in the pool
if restartS2 {
s2 = test.RunServer(&s2Opts)
defer s2.Shutdown()
expected = append(expected, s2Url)
} else {
s3 = test.RunServer(&s3Opts)
defer s3.Shutdown()
expected = append(expected, s3Url)
}
// Since this is not a "new" server, the DiscoveredServersCB won't be invoked.
checkPool(expected)
nc.Close()
// Restart s1
s1 = test.RunServer(&s1Opts)
defer s1.Shutdown()
// We should have all 3 servers running now...
// Create a client connection with special dialer.
d := &checkPoolUpdatedDialer{first: true}
nc, err = nats.Connect(s1Url,
nats.MaxReconnects(10),
nats.ReconnectWait(15*time.Millisecond),
nats.SetCustomDialer(d),
nats.ReconnectHandler(connHandler),
nats.ClosedHandler(connHandler))
if err != nil {
t.Fatalf("Error on connect")
}
defer nc.Close()
// Make sure that we have all 3 servers in the pool (this will wait if required)
checkPool(expected)
// Cause disconnection between client and server. We are going to reconnect
// and we want to check that when we get the INFO again with the list of
// servers, we don't lose the knowledge of how many times we tried to
// reconnect.
d.conn.Close()
// Wait for client to reconnect to a server
if err := Wait(chch); err != nil {
t.Fatal("Reconnect handler not invoked")
}
// At this point, we should have tried to reconnect 5 times to each server.
// For the one we reconnected to, its max reconnect attempts should have been
// cleared, not for the other ones.
// Cause a disconnect again and ensure we won't reconnect.
d.final = true
d.conn.Close()
// Wait for Close callback to be invoked.
if err := Wait(chch); err != nil {
t.Fatal("Close handler not invoked")
}
// Since MaxReconnect is 10, after trying 5 more times on 2 of the servers,
// these should have been removed. We have still 5 more tries for the server
// we did previously reconnect to.
// So total of reconnect attempt should be: 2*5+1*10=20
if d.ra != 20 {
t.Fatalf("Should have tried to reconnect 20 more times, got %v", d.ra)
}
nc.Close()
}