Merge master into breakout_swarm

Signed-off-by: Alex Ellis <alexellis2@gmail.com>
This commit is contained in:
Alex Ellis
2018-02-01 09:25:39 +00:00
parent afeb7bbce4
commit f954bf0733
1953 changed files with 614131 additions and 175582 deletions

View File

@ -0,0 +1,354 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package bench
import (
"bytes"
"encoding/csv"
"fmt"
"log"
"math"
"strconv"
"time"
"github.com/nats-io/go-nats"
"github.com/nats-io/nuid"
)
// A Sample for a particular client
type Sample struct {
JobMsgCnt int
MsgCnt uint64
MsgBytes uint64
IOBytes uint64
Start time.Time
End time.Time
}
// SampleGroup for a number of samples, the group is a Sample itself agregating the values the Samples
type SampleGroup struct {
Sample
Samples []*Sample
}
// Benchmark to hold the various Samples organized by publishers and subscribers
type Benchmark struct {
Sample
Name string
RunID string
Pubs *SampleGroup
Subs *SampleGroup
subChannel chan *Sample
pubChannel chan *Sample
}
// NewBenchmark initializes a Benchmark. After creating a bench call AddSubSample/AddPubSample.
// When done collecting samples, call EndBenchmark
func NewBenchmark(name string, subCnt, pubCnt int) *Benchmark {
bm := Benchmark{Name: name, RunID: nuid.Next()}
bm.Subs = NewSampleGroup()
bm.Pubs = NewSampleGroup()
bm.subChannel = make(chan *Sample, subCnt)
bm.pubChannel = make(chan *Sample, pubCnt)
return &bm
}
// Close organizes collected Samples and calculates aggregates. After Close(), no more samples can be added.
func (bm *Benchmark) Close() {
close(bm.subChannel)
close(bm.pubChannel)
for s := range bm.subChannel {
bm.Subs.AddSample(s)
}
for s := range bm.pubChannel {
bm.Pubs.AddSample(s)
}
if bm.Subs.HasSamples() {
bm.Start = bm.Subs.Start
bm.End = bm.Subs.End
} else {
bm.Start = bm.Pubs.Start
bm.End = bm.Pubs.End
}
if bm.Subs.HasSamples() && bm.Pubs.HasSamples() {
if bm.Start.After(bm.Subs.Start) {
bm.Start = bm.Subs.Start
}
if bm.Start.After(bm.Pubs.Start) {
bm.Start = bm.Pubs.Start
}
if bm.End.Before(bm.Subs.End) {
bm.End = bm.Subs.End
}
if bm.End.Before(bm.Pubs.End) {
bm.End = bm.Pubs.End
}
}
bm.MsgBytes = bm.Pubs.MsgBytes + bm.Subs.MsgBytes
bm.IOBytes = bm.Pubs.IOBytes + bm.Subs.IOBytes
bm.MsgCnt = bm.Pubs.MsgCnt + bm.Subs.MsgCnt
bm.JobMsgCnt = bm.Pubs.JobMsgCnt + bm.Subs.JobMsgCnt
}
// AddSubSample to the benchmark
func (bm *Benchmark) AddSubSample(s *Sample) {
bm.subChannel <- s
}
// AddPubSample to the benchmark
func (bm *Benchmark) AddPubSample(s *Sample) {
bm.pubChannel <- s
}
// CSV generates a csv report of all the samples collected
func (bm *Benchmark) CSV() string {
var buffer bytes.Buffer
writer := csv.NewWriter(&buffer)
headers := []string{"#RunID", "ClientID", "MsgCount", "MsgBytes", "MsgsPerSec", "BytesPerSec", "DurationSecs"}
if err := writer.Write(headers); err != nil {
log.Fatalf("Error while serializing headers %q: %v", headers, err)
}
groups := []*SampleGroup{bm.Subs, bm.Pubs}
pre := "S"
for i, g := range groups {
if i == 1 {
pre = "P"
}
for j, c := range g.Samples {
r := []string{bm.RunID, fmt.Sprintf("%s%d", pre, j), fmt.Sprintf("%d", c.MsgCnt), fmt.Sprintf("%d", c.MsgBytes), fmt.Sprintf("%d", c.Rate()), fmt.Sprintf("%f", c.Throughput()), fmt.Sprintf("%f", c.Duration().Seconds())}
if err := writer.Write(r); err != nil {
log.Fatalf("Error while serializing %v: %v", c, err)
}
}
}
writer.Flush()
return buffer.String()
}
// NewSample creates a new Sample initialized to the provided values. The nats.Conn information captured
func NewSample(jobCount int, msgSize int, start, end time.Time, nc *nats.Conn) *Sample {
s := Sample{JobMsgCnt: jobCount, Start: start, End: end}
s.MsgBytes = uint64(msgSize * jobCount)
s.MsgCnt = nc.OutMsgs + nc.InMsgs
s.IOBytes = nc.OutBytes + nc.InBytes
return &s
}
// Throughput of bytes per second
func (s *Sample) Throughput() float64 {
return float64(s.MsgBytes) / s.Duration().Seconds()
}
// Rate of meessages in the job per second
func (s *Sample) Rate() int64 {
return int64(float64(s.JobMsgCnt) / s.Duration().Seconds())
}
func (s *Sample) String() string {
rate := commaFormat(s.Rate())
throughput := HumanBytes(s.Throughput(), false)
return fmt.Sprintf("%s msgs/sec ~ %s/sec", rate, throughput)
}
// Duration that the sample was active
func (s *Sample) Duration() time.Duration {
return s.End.Sub(s.Start)
}
// Seconds that the sample or samples were active
func (s *Sample) Seconds() float64 {
return s.Duration().Seconds()
}
// NewSampleGroup initializer
func NewSampleGroup() *SampleGroup {
s := new(SampleGroup)
s.Samples = make([]*Sample, 0)
return s
}
// Statistics information of the sample group (min, average, max and standard deviation)
func (sg *SampleGroup) Statistics() string {
return fmt.Sprintf("min %s | avg %s | max %s | stddev %s msgs", commaFormat(sg.MinRate()), commaFormat(sg.AvgRate()), commaFormat(sg.MaxRate()), commaFormat(int64(sg.StdDev())))
}
// MinRate returns the smallest message rate in the SampleGroup
func (sg *SampleGroup) MinRate() int64 {
m := int64(0)
for i, s := range sg.Samples {
if i == 0 {
m = s.Rate()
}
m = min(m, s.Rate())
}
return m
}
// MaxRate returns the largest message rate in the SampleGroup
func (sg *SampleGroup) MaxRate() int64 {
m := int64(0)
for i, s := range sg.Samples {
if i == 0 {
m = s.Rate()
}
m = max(m, s.Rate())
}
return m
}
// AvgRate returns the average of all the message rates in the SampleGroup
func (sg *SampleGroup) AvgRate() int64 {
sum := uint64(0)
for _, s := range sg.Samples {
sum += uint64(s.Rate())
}
return int64(sum / uint64(len(sg.Samples)))
}
// StdDev returns the standard deviation the message rates in the SampleGroup
func (sg *SampleGroup) StdDev() float64 {
avg := float64(sg.AvgRate())
sum := float64(0)
for _, c := range sg.Samples {
sum += math.Pow(float64(c.Rate())-avg, 2)
}
variance := sum / float64(len(sg.Samples))
return math.Sqrt(variance)
}
// AddSample adds a Sample to the SampleGroup. After adding a Sample it shouldn't be modified.
func (sg *SampleGroup) AddSample(e *Sample) {
sg.Samples = append(sg.Samples, e)
if len(sg.Samples) == 1 {
sg.Start = e.Start
sg.End = e.End
}
sg.IOBytes += e.IOBytes
sg.JobMsgCnt += e.JobMsgCnt
sg.MsgCnt += e.MsgCnt
sg.MsgBytes += e.MsgBytes
if e.Start.Before(sg.Start) {
sg.Start = e.Start
}
if e.End.After(sg.End) {
sg.End = e.End
}
}
// HasSamples returns true if the group has samples
func (sg *SampleGroup) HasSamples() bool {
return len(sg.Samples) > 0
}
// Report returns a human readable report of the samples taken in the Benchmark
func (bm *Benchmark) Report() string {
var buffer bytes.Buffer
indent := ""
if !bm.Pubs.HasSamples() && !bm.Subs.HasSamples() {
return "No publisher or subscribers. Nothing to report."
}
if bm.Pubs.HasSamples() && bm.Subs.HasSamples() {
buffer.WriteString(fmt.Sprintf("%s Pub/Sub stats: %s\n", bm.Name, bm))
indent += " "
}
if bm.Pubs.HasSamples() {
buffer.WriteString(fmt.Sprintf("%sPub stats: %s\n", indent, bm.Pubs))
if len(bm.Pubs.Samples) > 1 {
for i, stat := range bm.Pubs.Samples {
buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt))
}
buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Pubs.Statistics()))
}
}
if bm.Subs.HasSamples() {
buffer.WriteString(fmt.Sprintf("%sSub stats: %s\n", indent, bm.Subs))
if len(bm.Subs.Samples) > 1 {
for i, stat := range bm.Subs.Samples {
buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt))
}
buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Subs.Statistics()))
}
}
return buffer.String()
}
func commaFormat(n int64) string {
in := strconv.FormatInt(n, 10)
out := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/3)
if in[0] == '-' {
in, out[0] = in[1:], '-'
}
for i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {
out[j] = in[i]
if i == 0 {
return string(out)
}
if k++; k == 3 {
j, k = j-1, 0
out[j] = ','
}
}
}
// HumanBytes formats bytes as a human readable string
func HumanBytes(bytes float64, si bool) string {
var base = 1024
pre := []string{"K", "M", "G", "T", "P", "E"}
var post = "B"
if si {
base = 1000
pre = []string{"k", "M", "G", "T", "P", "E"}
post = "iB"
}
if bytes < float64(base) {
return fmt.Sprintf("%.2f B", bytes)
}
exp := int(math.Log(bytes) / math.Log(float64(base)))
index := exp - 1
units := pre[index] + post
return fmt.Sprintf("%.2f %s", bytes/math.Pow(float64(base), float64(exp)), units)
}
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
// MsgsPerClient divides the number of messages by the number of clients and tries to distribute them as evenly as possible
func MsgsPerClient(numMsgs, numClients int) []int {
var counts []int
if numClients == 0 || numMsgs == 0 {
return counts
}
counts = make([]int, numClients)
mc := numMsgs / numClients
for i := 0; i < numClients; i++ {
counts[i] = mc
}
extra := numMsgs % numClients
for i := 0; i < extra; i++ {
counts[i]++
}
return counts
}

View File

@ -0,0 +1,226 @@
package bench
import (
"fmt"
"strings"
"testing"
"time"
"github.com/nats-io/go-nats"
)
const (
MsgSize = 8
Million = 1000 * 1000
)
var baseTime = time.Now()
func millionMessagesSecondSample(seconds int) *Sample {
messages := Million * seconds
start := baseTime
end := start.Add(time.Second * time.Duration(seconds))
nc := new(nats.Conn)
s := NewSample(messages, MsgSize, start, end, nc)
s.MsgCnt = uint64(messages)
s.MsgBytes = uint64(messages * MsgSize)
s.IOBytes = s.MsgBytes
return s
}
func TestDuration(t *testing.T) {
s := millionMessagesSecondSample(1)
duration := s.End.Sub(s.Start)
if duration != s.Duration() || duration != time.Second {
t.Fatal("Expected sample duration to be 1 second")
}
}
func TestSeconds(t *testing.T) {
s := millionMessagesSecondSample(1)
seconds := s.End.Sub(s.Start).Seconds()
if seconds != s.Seconds() || seconds != 1.0 {
t.Fatal("Expected sample seconds to be 1 second")
}
}
func TestRate(t *testing.T) {
s := millionMessagesSecondSample(60)
if s.Rate() != Million {
t.Fatal("Expected rate at 1 million msgs")
}
}
func TestThoughput(t *testing.T) {
s := millionMessagesSecondSample(60)
if s.Throughput() != Million*MsgSize {
t.Fatalf("Expected throughput at %d million bytes/sec", MsgSize)
}
}
func TestStrings(t *testing.T) {
s := millionMessagesSecondSample(60)
if len(s.String()) == 0 {
t.Fatal("Sample didn't provide a String")
}
}
func TestGroupDuration(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
duration := sg.End.Sub(sg.Start)
if duration != sg.Duration() || duration != time.Duration(2)*time.Second {
t.Fatal("Expected aggregate duration to be 2.0 seconds")
}
}
func TestGroupSeconds(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
seconds := sg.End.Sub(sg.Start).Seconds()
if seconds != sg.Seconds() || seconds != 3.0 {
t.Fatal("Expected aggregate seconds to be 3.0 seconds")
}
}
func TestGroupRate(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
if sg.Rate() != Million*2 {
t.Fatal("Expected MsgRate at 2 million msg/sec")
}
}
func TestGroupThoughput(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
if sg.Throughput() != 2*Million*MsgSize {
t.Fatalf("Expected througput at %d million bytes/sec", 2*MsgSize)
}
}
func TestMinMaxRate(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
if sg.MinRate() != sg.MaxRate() {
t.Fatal("Expected MinRate == MaxRate")
}
}
func TestAvgRate(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
if sg.MinRate() != sg.AvgRate() {
t.Fatal("Expected MinRate == AvgRate")
}
}
func TestStdDev(t *testing.T) {
sg := NewSampleGroup()
sg.AddSample(millionMessagesSecondSample(1))
sg.AddSample(millionMessagesSecondSample(2))
sg.AddSample(millionMessagesSecondSample(3))
if sg.StdDev() != 0.0 {
t.Fatal("Expected stddev to be zero")
}
}
func TestBenchSetup(t *testing.T) {
bench := NewBenchmark("test", 1, 1)
bench.AddSubSample(millionMessagesSecondSample(1))
bench.AddPubSample(millionMessagesSecondSample(1))
bench.Close()
if len(bench.RunID) == 0 {
t.Fatal("Bench doesn't have a RunID")
}
if len(bench.Pubs.Samples) != 1 {
t.Fatal("Expected one publisher")
}
if len(bench.Subs.Samples) != 1 {
t.Fatal("Expected one subscriber")
}
if bench.MsgCnt != 2*Million {
t.Fatal("Expected 2 million msgs")
}
if bench.IOBytes != 2*Million*MsgSize {
t.Fatalf("Expected %d million bytes", 2*MsgSize)
}
if bench.Duration() != time.Second {
t.Fatal("Expected duration to be 1 second")
}
}
func makeBench(subs, pubs int) *Benchmark {
bench := NewBenchmark("test", subs, pubs)
for i := 0; i < subs; i++ {
bench.AddSubSample(millionMessagesSecondSample(1))
}
for i := 0; i < pubs; i++ {
bench.AddPubSample(millionMessagesSecondSample(1))
}
bench.Close()
return bench
}
func TestCsv(t *testing.T) {
bench := makeBench(1, 1)
csv := bench.CSV()
lines := strings.Split(csv, "\n")
if len(lines) != 4 {
t.Fatal("Expected 4 lines of output from the CSV string")
}
fields := strings.Split(lines[1], ",")
if len(fields) != 7 {
t.Fatal("Expected 7 fields")
}
}
func TestBenchStrings(t *testing.T) {
bench := makeBench(1, 1)
s := bench.Report()
lines := strings.Split(s, "\n")
if len(lines) != 4 {
t.Fatal("Expected 3 lines of output: header, pub, sub, empty")
}
bench = makeBench(2, 2)
s = bench.Report()
lines = strings.Split(s, "\n")
if len(lines) != 10 {
fmt.Printf("%q\n", s)
t.Fatal("Expected 11 lines of output: header, pub header, pub x 2, stats, sub headers, sub x 2, stats, empty")
}
}
func TestMsgsPerClient(t *testing.T) {
zero := MsgsPerClient(0, 0)
if len(zero) != 0 {
t.Fatal("Expected 0 length for 0 clients")
}
onetwo := MsgsPerClient(1, 2)
if len(onetwo) != 2 || onetwo[0] != 1 || onetwo[1] != 0 {
t.Fatal("Expected uneven distribution")
}
twotwo := MsgsPerClient(2, 2)
if len(twotwo) != 2 || twotwo[0] != 1 || twotwo[1] != 1 {
t.Fatal("Expected even distribution")
}
threetwo := MsgsPerClient(3, 2)
if len(threetwo) != 2 || threetwo[0] != 2 || threetwo[1] != 1 {
t.Fatal("Expected uneven distribution")
}
}