rc1
This commit is contained in:
135
examples/database/main.go
Normal file
135
examples/database/main.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"gopkg.in/ini.v1"
|
||||
"gordenko.dev/dima/diploma/database"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
logfile = os.Stdout
|
||||
iniFileName string
|
||||
)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
|
||||
fmt.Fprint(flag.CommandLine.Output(), configExample)
|
||||
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.StringVar(&iniFileName, "c", "database.ini", "path to *.ini config file")
|
||||
flag.Parse()
|
||||
|
||||
config, err := loadConfig(iniFileName)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
var (
|
||||
exitCh = make(chan struct{})
|
||||
wg = new(sync.WaitGroup)
|
||||
)
|
||||
|
||||
db, err := database.New(database.Options{
|
||||
TCPPort: config.TcpPort,
|
||||
Dir: config.Dir,
|
||||
DatabaseName: config.DatabaseName,
|
||||
RedoDir: config.REDODir,
|
||||
Logfile: logfile,
|
||||
ExitCh: exitCh,
|
||||
WaitGroup: wg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("database.New: %s\n", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = db.ListenAndServe()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
fmt.Fprintf(logfile, "database %q started on port %d.\n",
|
||||
config.DatabaseName, config.TcpPort)
|
||||
fmt.Fprintln(logfile, config)
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
<-sigs
|
||||
|
||||
close(exitCh)
|
||||
wg.Wait()
|
||||
|
||||
fmt.Fprintln(logfile, "database stopped.")
|
||||
}
|
||||
|
||||
// config file
|
||||
|
||||
const mainUsage = `Usage:
|
||||
%s -c path/to/config.ini
|
||||
|
||||
`
|
||||
|
||||
const helpMessage = `Diploma project. Database. Version: 1.0
|
||||
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
|
||||
`
|
||||
|
||||
const configExample = `
|
||||
database.ini example:
|
||||
|
||||
tcpPort = 12345
|
||||
dir = ../../datadir
|
||||
redoDir = ../../datadir
|
||||
databaseName = test
|
||||
|
||||
`
|
||||
|
||||
type Config struct {
|
||||
TcpPort int
|
||||
Dir string
|
||||
REDODir string
|
||||
DatabaseName string
|
||||
}
|
||||
|
||||
func (s Config) String() string {
|
||||
return fmt.Sprintf(`starting options:
|
||||
tcpPort = %d
|
||||
dir = %s
|
||||
redoDir = %s
|
||||
databaseName = %s
|
||||
`,
|
||||
s.TcpPort, s.Dir, s.REDODir, s.DatabaseName)
|
||||
}
|
||||
|
||||
func loadConfig(iniFileName string) (_ Config, err error) {
|
||||
file, err := ini.Load(iniFileName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
conf := Config{}
|
||||
|
||||
top := file.Section("")
|
||||
|
||||
conf.TcpPort, err = top.Key("tcpPort").Int()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("'tcpPort' option is required in config file")
|
||||
return
|
||||
}
|
||||
conf.Dir = top.Key("dir").String()
|
||||
conf.REDODir = top.Key("redoDir").String()
|
||||
conf.DatabaseName = top.Key("databaseName").String()
|
||||
return conf, nil
|
||||
}
|
||||
377
examples/loadtest/loadtest.go
Normal file
377
examples/loadtest/loadtest.go
Normal file
@@ -0,0 +1,377 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
// METRICS INFO
|
||||
|
||||
type MetricInfo struct {
|
||||
MetricID uint32 `json:"metricID"`
|
||||
MetricType diploma.MetricType `json:"metricType"`
|
||||
FracDigits int `json:"fracDigits"`
|
||||
Since int64 `json:"since"`
|
||||
Until int64 `json:"until"`
|
||||
Qty int `json:"qty"`
|
||||
}
|
||||
|
||||
func readMetricInfo(fileName string) (list []MetricInfo, err error) {
|
||||
buf, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(buf, &list)
|
||||
return
|
||||
}
|
||||
|
||||
// RANDOM QUERY GENERATOR
|
||||
|
||||
type QueryRecipe struct {
|
||||
MetricID uint32
|
||||
MetricIDs []uint32
|
||||
Method int
|
||||
RangeCode int
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy diploma.GroupBy
|
||||
}
|
||||
|
||||
type RandomQueryGenerator struct {
|
||||
metrics []MetricInfo
|
||||
groupByOptions []diploma.GroupBy
|
||||
listCurrentValuesProbability int
|
||||
listPeriodsProbability int
|
||||
timeRangeProbDistribution []int
|
||||
}
|
||||
|
||||
type RandomQueryGeneratorOptions struct {
|
||||
Metrics []MetricInfo
|
||||
ListCurrentValuesProbability int
|
||||
ListPeriodsProbability int
|
||||
TimeRangeProbabilities []int
|
||||
}
|
||||
|
||||
func NewRandomQueryGenerator(opt RandomQueryGeneratorOptions) *RandomQueryGenerator {
|
||||
if opt.ListCurrentValuesProbability >= 100 {
|
||||
panic(fmt.Sprintf("wrong ListCurrentValuesProbability: %d", opt.ListCurrentValuesProbability))
|
||||
}
|
||||
|
||||
if opt.ListPeriodsProbability >= 100 {
|
||||
panic(fmt.Sprintf("wrong ListPeriodsProbability: %d", opt.ListPeriodsProbability))
|
||||
}
|
||||
// check total time range propability
|
||||
var totalTimeRangeProbability int
|
||||
for _, p := range opt.TimeRangeProbabilities {
|
||||
totalTimeRangeProbability += p
|
||||
}
|
||||
if totalTimeRangeProbability != 100 {
|
||||
panic(fmt.Sprintf("total time range probabilities != 100: %d", totalTimeRangeProbability))
|
||||
}
|
||||
|
||||
// create time range probability distribution
|
||||
timeRangeProbDistribution := make([]int, len(opt.TimeRangeProbabilities))
|
||||
timeRangeProbDistribution[0] = opt.TimeRangeProbabilities[0]
|
||||
for i := 1; i < len(opt.TimeRangeProbabilities); i++ {
|
||||
timeRangeProbDistribution[i] = timeRangeProbDistribution[i-1] + opt.TimeRangeProbabilities[i]
|
||||
}
|
||||
|
||||
return &RandomQueryGenerator{
|
||||
metrics: opt.Metrics,
|
||||
groupByOptions: []diploma.GroupBy{
|
||||
diploma.GroupByHour,
|
||||
diploma.GroupByDay,
|
||||
diploma.GroupByMonth,
|
||||
},
|
||||
listCurrentValuesProbability: opt.ListCurrentValuesProbability,
|
||||
listPeriodsProbability: opt.ListPeriodsProbability,
|
||||
timeRangeProbDistribution: timeRangeProbDistribution,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *RandomQueryGenerator) GetQueryRecipe() QueryRecipe {
|
||||
metric := s.getRandomMetric()
|
||||
|
||||
num := rand.Intn(100)
|
||||
if num < s.listCurrentValuesProbability {
|
||||
qty := 5 + rand.Intn(100) // від 5 до 105
|
||||
return QueryRecipe{
|
||||
MetricIDs: s.listRandomUniqueMetricIDs(qty),
|
||||
Method: listCurrentValues,
|
||||
}
|
||||
} else {
|
||||
if metric.MetricType == diploma.Cumulative {
|
||||
num = rand.Intn(100)
|
||||
if num < s.listPeriodsProbability {
|
||||
groupBy := s.groupByOptions[rand.Intn(len(s.groupByOptions))]
|
||||
|
||||
var (
|
||||
minDays = 1
|
||||
maxDays = 7
|
||||
)
|
||||
|
||||
if groupBy == diploma.GroupByDay {
|
||||
minDays = 1
|
||||
maxDays = 30
|
||||
} else if groupBy == diploma.GroupByMonth {
|
||||
minDays = 1
|
||||
maxDays = 30
|
||||
}
|
||||
|
||||
rangeCode, since, until := s.getRandomTimeRange(
|
||||
metric.Since, metric.Until, minDays, maxDays)
|
||||
|
||||
return QueryRecipe{
|
||||
MetricID: metric.MetricID,
|
||||
Method: listCumulativePeriods,
|
||||
RangeCode: rangeCode,
|
||||
Since: uint32(since),
|
||||
Until: uint32(until),
|
||||
GroupBy: groupBy,
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
minDays = 1
|
||||
maxDays = 3
|
||||
)
|
||||
|
||||
rangeCode, since, until := s.getRandomTimeRange(
|
||||
metric.Since, metric.Until, minDays, maxDays)
|
||||
|
||||
return QueryRecipe{
|
||||
MetricID: metric.MetricID,
|
||||
Method: listCumulativeMeasures,
|
||||
RangeCode: rangeCode,
|
||||
Since: uint32(since),
|
||||
Until: uint32(until),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
num = rand.Intn(100)
|
||||
if num < s.listPeriodsProbability {
|
||||
groupBy := s.groupByOptions[rand.Intn(len(s.groupByOptions))]
|
||||
|
||||
var (
|
||||
minDays = 1
|
||||
maxDays = 7
|
||||
)
|
||||
|
||||
if groupBy == diploma.GroupByDay {
|
||||
minDays = 1
|
||||
maxDays = 30
|
||||
} else if groupBy == diploma.GroupByMonth {
|
||||
minDays = 1
|
||||
maxDays = 30
|
||||
}
|
||||
|
||||
rangeCode, since, until := s.getRandomTimeRange(
|
||||
metric.Since, metric.Until, minDays, maxDays)
|
||||
|
||||
return QueryRecipe{
|
||||
MetricID: metric.MetricID,
|
||||
Method: listInstantPeriods,
|
||||
RangeCode: rangeCode,
|
||||
Since: uint32(since),
|
||||
Until: uint32(until),
|
||||
GroupBy: groupBy,
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
minDays = 1
|
||||
maxDays = 3
|
||||
)
|
||||
|
||||
rangeCode, since, until := s.getRandomTimeRange(
|
||||
metric.Since, metric.Until, minDays, maxDays)
|
||||
|
||||
return QueryRecipe{
|
||||
MetricID: metric.MetricID,
|
||||
Method: listInstantMeasures,
|
||||
RangeCode: rangeCode,
|
||||
Since: uint32(since),
|
||||
Until: uint32(until),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Генерує випадковий набір унікальних metricID з [1, 100]
|
||||
func (s *RandomQueryGenerator) listRandomUniqueMetricIDs(count int) []uint32 {
|
||||
// переставляю індекси у випадковому порядку
|
||||
indexes := rand.Perm(len(s.metrics))
|
||||
// копіюю metricID із перших випадкових індексів
|
||||
metricIDs := make([]uint32, count)
|
||||
for i := range count {
|
||||
metricIDs[i] = s.metrics[indexes[i]].MetricID
|
||||
}
|
||||
return metricIDs
|
||||
}
|
||||
|
||||
const (
|
||||
secondsPerDay = 86400
|
||||
dayRange = 0
|
||||
weekRange = 1
|
||||
monthRange = 2
|
||||
randomTimeRange = 3
|
||||
)
|
||||
|
||||
// Випадковий часовий діапазон
|
||||
func (s *RandomQueryGenerator) getRandomTimeRange(start, end int64, minDays, maxDays int) (int, int64, int64) {
|
||||
var (
|
||||
since int64
|
||||
until int64
|
||||
num = rand.Intn(100)
|
||||
rangeCode int
|
||||
threshold int
|
||||
)
|
||||
for rangeCode, threshold = range s.timeRangeProbDistribution {
|
||||
if num < threshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch rangeCode {
|
||||
case dayRange:
|
||||
since = end - secondsPerDay
|
||||
until = end
|
||||
|
||||
case weekRange:
|
||||
since = end - 7*secondsPerDay
|
||||
until = end
|
||||
|
||||
case monthRange:
|
||||
since = end - 30*secondsPerDay
|
||||
until = end
|
||||
|
||||
case randomTimeRange:
|
||||
if start == end {
|
||||
return rangeCode, start, end
|
||||
}
|
||||
// Випадковий момент часу для since
|
||||
since = start + rand.Int63n(end-start)
|
||||
// Випадкова тривалість у днях (але не виходити за межу end)
|
||||
durationInDays := minDays + rand.Intn(maxDays-minDays)
|
||||
|
||||
until = since + int64(durationInDays)*secondsPerDay
|
||||
if until > end {
|
||||
until = end
|
||||
}
|
||||
}
|
||||
return rangeCode, since, until
|
||||
}
|
||||
|
||||
func (s *RandomQueryGenerator) getRandomMetric() MetricInfo {
|
||||
return s.metrics[rand.Intn(len(s.metrics))]
|
||||
}
|
||||
|
||||
// EXECUTE QUERY
|
||||
|
||||
func execQuery(conn *client.Connection, queryGenerator *RandomQueryGenerator, stat *WorkerStat) (err error) {
|
||||
recipe := queryGenerator.GetQueryRecipe()
|
||||
|
||||
var elapsedTime time.Duration
|
||||
|
||||
switch recipe.Method {
|
||||
case listCurrentValues:
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListCurrentValues(recipe.MetricIDs)
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
stat.Queries++
|
||||
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
|
||||
stat.MethodCalls[recipe.Method]++
|
||||
if err != nil {
|
||||
return fmt.Errorf("ListCurrentValues: %s", err)
|
||||
}
|
||||
|
||||
case listInstantMeasures:
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListInstantMeasures(proto.ListInstantMeasuresReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
})
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
stat.Queries++
|
||||
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
|
||||
stat.MethodCalls[recipe.Method]++
|
||||
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
|
||||
stat.TimeRangeCalls[recipe.RangeCode]++
|
||||
if err != nil {
|
||||
return fmt.Errorf("ListInstantMeasures(%d): %s",
|
||||
recipe.MetricID, err)
|
||||
}
|
||||
|
||||
case listCumulativeMeasures:
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListCumulativeMeasures(proto.ListCumulativeMeasuresReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
})
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
stat.Queries++
|
||||
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
|
||||
stat.MethodCalls[recipe.Method]++
|
||||
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
|
||||
stat.TimeRangeCalls[recipe.RangeCode]++
|
||||
if err != nil {
|
||||
return fmt.Errorf("ListCumulativeMeasures(%d): %s",
|
||||
recipe.MetricID, err)
|
||||
}
|
||||
|
||||
case listInstantPeriods:
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
GroupBy: recipe.GroupBy,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
stat.Queries++
|
||||
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
|
||||
stat.MethodCalls[recipe.Method]++
|
||||
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
|
||||
stat.TimeRangeCalls[recipe.RangeCode]++
|
||||
if err != nil {
|
||||
return fmt.Errorf("ListInstantPeriods(%d): %s",
|
||||
recipe.MetricID, err)
|
||||
}
|
||||
|
||||
case listCumulativePeriods:
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
GroupBy: recipe.GroupBy,
|
||||
})
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
stat.Queries++
|
||||
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
|
||||
stat.MethodCalls[recipe.Method]++
|
||||
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
|
||||
stat.TimeRangeCalls[recipe.RangeCode]++
|
||||
if err != nil {
|
||||
return fmt.Errorf("ListCumulativePeriods(%d): %s",
|
||||
recipe.MetricID, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
261
examples/loadtest/main.go
Normal file
261
examples/loadtest/main.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/ini.v1"
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
)
|
||||
|
||||
const (
|
||||
listCumulativeMeasures = 0
|
||||
listCumulativePeriods = 1
|
||||
listInstantMeasures = 2
|
||||
listInstantPeriods = 3
|
||||
listCurrentValues = 4
|
||||
|
||||
methodsQty = 5
|
||||
timeRangesQty = 4
|
||||
)
|
||||
|
||||
var (
|
||||
methodCodeToName = []string{
|
||||
"listCumulativeMeasures",
|
||||
"listCumulativePeriods",
|
||||
"listInstantMeasures",
|
||||
"listInstantPeriods",
|
||||
"listCurrentValues",
|
||||
}
|
||||
|
||||
rangeCodeToName = []string{
|
||||
"last day",
|
||||
"last week",
|
||||
"last month",
|
||||
"random time range",
|
||||
}
|
||||
)
|
||||
|
||||
type WorkerStat struct {
|
||||
Queries int
|
||||
ElapsedTime time.Duration
|
||||
MethodCalls []int
|
||||
ElapsedTimeByMethods []time.Duration
|
||||
TimeRangeCalls []int
|
||||
ElapsedTimeByTimeRanges []time.Duration
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
iniFileName string
|
||||
)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
|
||||
fmt.Fprint(flag.CommandLine.Output(), configExample)
|
||||
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.StringVar(&iniFileName, "c", "loadtest.ini", "path to *.ini config file")
|
||||
flag.Parse()
|
||||
|
||||
config, err := loadConfig(iniFileName)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
metrics, err := readMetricInfo(config.MetricsInfo)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
var (
|
||||
wg = new(sync.WaitGroup)
|
||||
stats = make([]*WorkerStat, config.Connections)
|
||||
queryGenerator = NewRandomQueryGenerator(RandomQueryGeneratorOptions{
|
||||
Metrics: metrics,
|
||||
// call method probabilitites
|
||||
ListCurrentValuesProbability: 50, // current values / others
|
||||
ListPeriodsProbability: 80, // periods / measures
|
||||
// time range probabilities
|
||||
TimeRangeProbabilities: []int{
|
||||
82, // last day
|
||||
12, // last week
|
||||
3, // last month
|
||||
3, // any range
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
for i := range stats {
|
||||
stats[i] = &WorkerStat{
|
||||
MethodCalls: make([]int, methodsQty),
|
||||
ElapsedTimeByMethods: make([]time.Duration, methodsQty),
|
||||
TimeRangeCalls: make([]int, timeRangesQty),
|
||||
ElapsedTimeByTimeRanges: make([]time.Duration, timeRangesQty),
|
||||
}
|
||||
}
|
||||
|
||||
t1 := time.Now()
|
||||
|
||||
for i := range config.Connections {
|
||||
wg.Add(1)
|
||||
go func(stat *WorkerStat) {
|
||||
defer wg.Done()
|
||||
|
||||
conn, err := client.Connect(config.DatabaseAddr)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
for range config.RequestsPerConn {
|
||||
err := execQuery(conn, queryGenerator, stat)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
}(stats[i])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
testingTime := time.Since(t1)
|
||||
|
||||
var (
|
||||
methodCalls = make([]int, methodsQty)
|
||||
elapsedTimeByMethods = make([]time.Duration, methodsQty)
|
||||
timeRangeCalls = make([]int, timeRangesQty)
|
||||
elapsedTimeByTimeRanges = make([]time.Duration, timeRangesQty)
|
||||
totalElapsedTime time.Duration
|
||||
totalQueries int
|
||||
avgTimePerQuery time.Duration
|
||||
rps float64
|
||||
)
|
||||
|
||||
for _, stat := range stats {
|
||||
totalElapsedTime += stat.ElapsedTime
|
||||
totalQueries += stat.Queries
|
||||
|
||||
for i, elapsedTime := range stat.ElapsedTimeByMethods {
|
||||
elapsedTimeByMethods[i] += elapsedTime
|
||||
}
|
||||
for i, qty := range stat.MethodCalls {
|
||||
methodCalls[i] += qty
|
||||
}
|
||||
for i, elapsedTime := range stat.ElapsedTimeByTimeRanges {
|
||||
elapsedTimeByTimeRanges[i] += elapsedTime
|
||||
}
|
||||
for i, qty := range stat.TimeRangeCalls {
|
||||
timeRangeCalls[i] += qty
|
||||
}
|
||||
}
|
||||
|
||||
avgTimePerQuery = totalElapsedTime / time.Duration(totalQueries)
|
||||
rps = float64(config.Connections*config.RequestsPerConn) / testingTime.Seconds()
|
||||
|
||||
fmt.Printf(`TEST RESULTS:
|
||||
Time: %.0f seconds
|
||||
Connections: %d
|
||||
Requests per conn: %d
|
||||
Total requests: %d
|
||||
AVG request time: %v
|
||||
RPS: %d
|
||||
|
||||
`,
|
||||
testingTime.Seconds(), config.Connections, config.RequestsPerConn,
|
||||
totalQueries, avgTimePerQuery, int(rps))
|
||||
|
||||
for i, calls := range methodCalls {
|
||||
totalElapsedTimeByMethod := elapsedTimeByMethods[i]
|
||||
|
||||
methodPercent := float64(calls*100) / float64(totalQueries)
|
||||
|
||||
fmt.Printf("%s: %d (%.1f%%), AVG request time: %v\n",
|
||||
methodCodeToName[i], calls, methodPercent,
|
||||
totalElapsedTimeByMethod/time.Duration(calls))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
|
||||
for i, calls := range timeRangeCalls {
|
||||
totalElapsedTimeByTimeRange := elapsedTimeByTimeRanges[i]
|
||||
|
||||
timeRangePercent := float64(calls*100) / float64(totalQueries-methodCalls[listCurrentValues])
|
||||
|
||||
fmt.Printf("%s: %d (%.1f%%), AVG request time: %v\n",
|
||||
rangeCodeToName[i], calls, timeRangePercent,
|
||||
totalElapsedTimeByTimeRange/time.Duration(calls))
|
||||
}
|
||||
}
|
||||
|
||||
// CONFIG FILE
|
||||
|
||||
const mainUsage = `Usage:
|
||||
%s -c path/to/config.ini
|
||||
|
||||
`
|
||||
|
||||
const helpMessage = `Diploma project. Load test. Version: 1.0
|
||||
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
|
||||
`
|
||||
|
||||
const configExample = `
|
||||
loadtest.ini example:
|
||||
|
||||
databaseAddr = :12345
|
||||
metricsInfo = ../../datadir/metrics.info
|
||||
connections = 1000
|
||||
requestsPerConn = 500
|
||||
|
||||
`
|
||||
|
||||
type Config struct {
|
||||
DatabaseAddr string
|
||||
MetricsInfo string
|
||||
Connections int
|
||||
RequestsPerConn int
|
||||
}
|
||||
|
||||
func (s Config) String() string {
|
||||
return fmt.Sprintf(`starting options:
|
||||
databaseAddr = %s
|
||||
metricsInfo = %s
|
||||
connections = %d
|
||||
requestsPerConn = %d
|
||||
`,
|
||||
s.DatabaseAddr, s.MetricsInfo, s.Connections, s.RequestsPerConn)
|
||||
}
|
||||
|
||||
func loadConfig(iniFileName string) (_ Config, err error) {
|
||||
file, err := ini.Load(iniFileName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
conf := Config{}
|
||||
|
||||
top := file.Section("")
|
||||
|
||||
conf.DatabaseAddr = top.Key("databaseAddr").String()
|
||||
conf.MetricsInfo = top.Key("metricsInfo").String()
|
||||
|
||||
conf.Connections, err = top.Key("connections").Int()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("'connections' option is required in config file")
|
||||
return
|
||||
}
|
||||
conf.RequestsPerConn, err = top.Key("requestsPerConn").Int()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("'requestsPerConn' option is required in config file")
|
||||
return
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
81
examples/requests/generate.go
Normal file
81
examples/requests/generate.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
)
|
||||
|
||||
func GenerateCumulativeMeasures(days int) []client.Measure {
|
||||
var (
|
||||
measures []client.Measure
|
||||
minutes = []int{14, 29, 44, 59}
|
||||
hoursPerDay = 24
|
||||
totalHours = days * hoursPerDay
|
||||
since = time.Now().AddDate(0, 0, -days)
|
||||
totalValue float64
|
||||
)
|
||||
|
||||
for i := range totalHours {
|
||||
hourTime := since.Add(time.Duration(i) * time.Hour)
|
||||
for _, m := range minutes {
|
||||
measureTime := time.Date(
|
||||
hourTime.Year(),
|
||||
hourTime.Month(),
|
||||
hourTime.Day(),
|
||||
hourTime.Hour(),
|
||||
m, // minutes
|
||||
0, // seconds
|
||||
0, // nanoseconds
|
||||
time.Local,
|
||||
)
|
||||
|
||||
measure := client.Measure{
|
||||
Timestamp: uint32(measureTime.Unix()),
|
||||
Value: totalValue,
|
||||
}
|
||||
measures = append(measures, measure)
|
||||
|
||||
totalValue += rand.Float64()
|
||||
}
|
||||
}
|
||||
return measures
|
||||
}
|
||||
|
||||
func GenerateInstantMeasures(days int, baseValue float64) []client.Measure {
|
||||
var (
|
||||
measures []client.Measure
|
||||
minutes = []int{14, 29, 44, 59}
|
||||
hoursPerDay = 24
|
||||
totalHours = days * hoursPerDay
|
||||
since = time.Now().AddDate(0, 0, -days)
|
||||
)
|
||||
|
||||
for i := range totalHours {
|
||||
hourTime := since.Add(time.Duration(i) * time.Hour)
|
||||
for _, m := range minutes {
|
||||
measureTime := time.Date(
|
||||
hourTime.Year(),
|
||||
hourTime.Month(),
|
||||
hourTime.Day(),
|
||||
hourTime.Hour(),
|
||||
m, // minutes
|
||||
0, // seconds
|
||||
0, // nanoseconds
|
||||
time.Local,
|
||||
)
|
||||
|
||||
// value = +-10% from base value
|
||||
fluctuation := baseValue * 0.1
|
||||
value := baseValue + (rand.Float64()*2-1)*fluctuation
|
||||
|
||||
measure := client.Measure{
|
||||
Timestamp: uint32(measureTime.Unix()),
|
||||
Value: value,
|
||||
}
|
||||
measures = append(measures, measure)
|
||||
}
|
||||
}
|
||||
return measures
|
||||
}
|
||||
90
examples/requests/main.go
Normal file
90
examples/requests/main.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"gopkg.in/ini.v1"
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
)
|
||||
|
||||
var (
|
||||
metricTypeToName = []string{
|
||||
"",
|
||||
"cumulative",
|
||||
"instant",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
iniFileName string
|
||||
)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
|
||||
fmt.Fprint(flag.CommandLine.Output(), configExample)
|
||||
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.StringVar(&iniFileName, "c", "requests.ini", "path to *.ini config file")
|
||||
flag.Parse()
|
||||
|
||||
config, err := loadConfig(iniFileName)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
conn, err := client.Connect(config.DatabaseAddr)
|
||||
if err != nil {
|
||||
log.Fatalf("client.Connect(%s): %s\n", config.DatabaseAddr, err)
|
||||
} else {
|
||||
fmt.Println("Connected to database")
|
||||
}
|
||||
|
||||
sendRequests(conn)
|
||||
}
|
||||
|
||||
// CONFIG FILE
|
||||
|
||||
const mainUsage = `Usage:
|
||||
%s -c path/to/config.ini
|
||||
|
||||
`
|
||||
|
||||
const helpMessage = `Diploma project. Example requests. Version: 1.0
|
||||
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
|
||||
`
|
||||
|
||||
const configExample = `
|
||||
requests.ini example:
|
||||
|
||||
databaseAddr = :12345
|
||||
|
||||
`
|
||||
|
||||
type Config struct {
|
||||
DatabaseAddr string
|
||||
}
|
||||
|
||||
func (s Config) String() string {
|
||||
return fmt.Sprintf(`starting options:
|
||||
databaseAddr = %s
|
||||
`,
|
||||
s.DatabaseAddr)
|
||||
}
|
||||
|
||||
func loadConfig(iniFileName string) (_ Config, err error) {
|
||||
file, err := ini.Load(iniFileName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
conf := Config{}
|
||||
top := file.Section("")
|
||||
|
||||
conf.DatabaseAddr = top.Key("databaseAddr").String()
|
||||
return conf, nil
|
||||
}
|
||||
361
examples/requests/requests.go
Normal file
361
examples/requests/requests.go
Normal file
@@ -0,0 +1,361 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
func sendRequests(conn *client.Connection) {
|
||||
var (
|
||||
instantMetricID uint32 = 10000
|
||||
cumulativeMetricID uint32 = 10001
|
||||
fracDigits byte = 2
|
||||
err error
|
||||
)
|
||||
|
||||
conn.DeleteMetric(instantMetricID)
|
||||
conn.DeleteMetric(cumulativeMetricID)
|
||||
|
||||
// ADD INSTANT METRIC
|
||||
|
||||
err = conn.AddMetric(client.Metric{
|
||||
MetricID: instantMetricID,
|
||||
MetricType: diploma.Instant,
|
||||
FracDigits: fracDigits,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.AddMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nInstant metric %d added\n", instantMetricID)
|
||||
}
|
||||
|
||||
// GET INSTANT METRIC
|
||||
|
||||
iMetric, err := conn.GetMetric(instantMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.GetMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf(`
|
||||
GetMetric:
|
||||
metricID: %d
|
||||
metricType: %s
|
||||
fracDigits: %d
|
||||
`,
|
||||
iMetric.MetricID, metricTypeToName[iMetric.MetricType], fracDigits)
|
||||
}
|
||||
|
||||
// APPEND MEASURES
|
||||
|
||||
instantMeasures := GenerateInstantMeasures(62, 220)
|
||||
|
||||
err = conn.AppendMeasures(client.AppendMeasuresReq{
|
||||
MetricID: instantMetricID,
|
||||
Measures: instantMeasures,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.AppendMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nAppended %d measures for the metric %d\n",
|
||||
len(instantMeasures), instantMetricID)
|
||||
}
|
||||
|
||||
// LIST INSTANT MEASURES
|
||||
|
||||
lastTimestamp := instantMeasures[len(instantMeasures)-1].Timestamp
|
||||
until := time.Unix(int64(lastTimestamp), 0)
|
||||
since := until.Add(-5 * time.Hour)
|
||||
|
||||
instantList, err := conn.ListInstantMeasures(proto.ListInstantMeasuresReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantMeasures %s - %s:\n",
|
||||
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
|
||||
for _, item := range instantList {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST ALL INSTANT MEASURES
|
||||
|
||||
instantList, err = conn.ListAllInstantMeasures(instantMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListAllInstantMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListAllInstantMeasures (last 15 items):\n")
|
||||
for _, item := range instantList[:15] {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by hour)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.Add(-24 * time.Hour)
|
||||
|
||||
instantPeriods, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByHour,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (1 day, group by hour):\n")
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatHourPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -7)
|
||||
|
||||
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByDay,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (7 days, group by day):\n")
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatDayPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by month)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -62)
|
||||
|
||||
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByMonth,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (62 days, group by month):\n")
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatMonthPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// DELETE INSTANT METRIC MEASURES
|
||||
|
||||
err = conn.DeleteMeasures(proto.DeleteMeasuresReq{
|
||||
MetricID: instantMetricID,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.DeleteMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nInstant metric %d measures deleted\n", instantMetricID)
|
||||
}
|
||||
|
||||
// DELETE INSTANT METRIC
|
||||
|
||||
err = conn.DeleteMetric(instantMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.DeleteMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nInstant metric %d deleted\n", instantMetricID)
|
||||
}
|
||||
|
||||
// ADD CUMULATIVE METRIC
|
||||
|
||||
err = conn.AddMetric(client.Metric{
|
||||
MetricID: cumulativeMetricID,
|
||||
MetricType: diploma.Cumulative,
|
||||
FracDigits: fracDigits,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.AddMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nCumulative metric %d added\n", cumulativeMetricID)
|
||||
}
|
||||
|
||||
// GET CUMULATIVE METRIC
|
||||
|
||||
cMetric, err := conn.GetMetric(cumulativeMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.GetMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf(`
|
||||
GetMetric:
|
||||
metricID: %d
|
||||
metricType: %s
|
||||
fracDigits: %d
|
||||
`,
|
||||
cMetric.MetricID, metricTypeToName[cMetric.MetricType], fracDigits)
|
||||
}
|
||||
|
||||
// APPEND MEASURES
|
||||
|
||||
cumulativeMeasures := GenerateCumulativeMeasures(62)
|
||||
|
||||
err = conn.AppendMeasures(client.AppendMeasuresReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Measures: cumulativeMeasures,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.AppendMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nAppended %d measures for the metric %d\n",
|
||||
len(cumulativeMeasures), cumulativeMetricID)
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE MEASURES
|
||||
|
||||
lastTimestamp = cumulativeMeasures[len(cumulativeMeasures)-1].Timestamp
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.Add(-5 * time.Hour)
|
||||
|
||||
cumulativeList, err := conn.ListCumulativeMeasures(proto.ListCumulativeMeasuresReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativeMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativeMeasures %s - %s:\n",
|
||||
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
|
||||
|
||||
for _, item := range cumulativeList {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST ALL CUMULATIVE MEASURES
|
||||
|
||||
cumulativeList, err = conn.ListAllCumulativeMeasures(cumulativeMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListAllCumulativeMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListAllCumulativeMeasures (last 15 items):\n")
|
||||
for _, item := range cumulativeList[:15] {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by hour)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.Add(-24 * time.Hour)
|
||||
|
||||
cumulativePeriods, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByHour,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (1 day, group by hour):\n")
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatHourPeriod(item.Period), item.EndValue, item.Total)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -7)
|
||||
|
||||
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByDay,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (7 days, group by day):\n")
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatDayPeriod(item.Period), item.EndValue, item.Total)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -62)
|
||||
|
||||
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByMonth,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (62 days, group by month):\n")
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatMonthPeriod(item.Period), item.EndValue, item.Total)
|
||||
}
|
||||
}
|
||||
|
||||
// DELETE CUMULATIVE METRIC MEASURES
|
||||
|
||||
err = conn.DeleteMeasures(proto.DeleteMeasuresReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.DeleteMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nCumulative metric %d measures deleted\n", cumulativeMetricID)
|
||||
}
|
||||
|
||||
// DELETE CUMULATIVE METRIC
|
||||
|
||||
err = conn.DeleteMetric(cumulativeMetricID)
|
||||
if err != nil {
|
||||
log.Fatalf("conn.DeleteMetric: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nCumulative metric %d deleted\n", cumulativeMetricID)
|
||||
}
|
||||
}
|
||||
|
||||
const datetimeLayout = "2006-01-02 15:04:05"
|
||||
|
||||
func formatTime(timestamp uint32) string {
|
||||
tm := time.Unix(int64(timestamp), 0)
|
||||
return tm.Format(datetimeLayout)
|
||||
}
|
||||
|
||||
func formatHourPeriod(period uint32) string {
|
||||
tm := time.Unix(int64(period), 0)
|
||||
return tm.Format("2006-01-02 15:00 - 15") + ":59"
|
||||
}
|
||||
|
||||
func formatDayPeriod(period uint32) string {
|
||||
tm := time.Unix(int64(period), 0)
|
||||
return tm.Format("2006-01-02")
|
||||
}
|
||||
|
||||
func formatMonthPeriod(period uint32) string {
|
||||
tm := time.Unix(int64(period), 0)
|
||||
return tm.Format("2006-01")
|
||||
}
|
||||
Reference in New Issue
Block a user