rc2
This commit is contained in:
@@ -1,261 +0,0 @@
|
||||
package atree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/timeutil"
|
||||
)
|
||||
|
||||
// AGGREGATE
|
||||
|
||||
type InstantAggregator struct {
|
||||
firstHourOfDay int
|
||||
time2period func(uint32) uint32
|
||||
currentPeriod uint32
|
||||
since uint32
|
||||
until uint32
|
||||
min float64
|
||||
max float64
|
||||
total float64
|
||||
entries int
|
||||
}
|
||||
|
||||
type InstantAggregatorOptions struct {
|
||||
GroupBy diploma.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func NewInstantAggregator(opt InstantAggregatorOptions) (*InstantAggregator, error) {
|
||||
s := &InstantAggregator{
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Приходят данные от свежих к старым, тоесть сперва получаю Until.
|
||||
// return period complete flag
|
||||
func (s *InstantAggregator) Feed(timestamp uint32, value float64, p *InstantPeriod) bool {
|
||||
period := s.time2period(timestamp)
|
||||
//fmt.Printf("feed: %s %v, period: %s\n", time.Unix(int64(timestamp), 0), value, time.Unix(int64(period), 0))
|
||||
if s.entries == 0 {
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
return false
|
||||
}
|
||||
|
||||
if period != s.currentPeriod {
|
||||
// готовый период
|
||||
s.FillPeriod(timestamp, p)
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
return true
|
||||
}
|
||||
|
||||
if value < s.min {
|
||||
s.min = value
|
||||
} else if value > s.max {
|
||||
s.max = value
|
||||
}
|
||||
// для подсчета AVG
|
||||
s.total += value
|
||||
s.entries++
|
||||
// начало периода
|
||||
s.since = timestamp
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) FillPeriod(prevTimestamp uint32, p *InstantPeriod) bool {
|
||||
if s.entries == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
//fmt.Printf("FillPeriod: %s, prevTimestamp: %s\n", time.Unix(int64(s.currentPeriod), 0), time.Unix(int64(prevTimestamp), 0))
|
||||
p.Period = s.currentPeriod
|
||||
if prevTimestamp > 0 {
|
||||
p.Since = prevTimestamp
|
||||
} else {
|
||||
p.Since = s.since
|
||||
}
|
||||
p.Until = s.until
|
||||
p.Min = s.min
|
||||
p.Max = s.max
|
||||
p.Avg = s.total / float64(s.entries)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
// CUMULATIVE
|
||||
|
||||
type CumulativeAggregator struct {
|
||||
firstHourOfDay int
|
||||
time2period func(uint32) uint32
|
||||
currentPeriod uint32
|
||||
since uint32
|
||||
until uint32
|
||||
sinceValue float64
|
||||
untilValue float64
|
||||
entries int
|
||||
}
|
||||
|
||||
type CumulativeAggregatorOptions struct {
|
||||
GroupBy diploma.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func NewCumulativeAggregator(opt CumulativeAggregatorOptions) (*CumulativeAggregator, error) {
|
||||
s := &CumulativeAggregator{
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// return period complete flag
|
||||
func (s *CumulativeAggregator) Feed(timestamp uint32, value float64, p *CumulativePeriod) bool {
|
||||
period := s.time2period(timestamp)
|
||||
if s.entries == 0 {
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.sinceValue = value
|
||||
s.untilValue = value
|
||||
s.entries = 1
|
||||
return false
|
||||
}
|
||||
|
||||
if period != s.currentPeriod {
|
||||
// готовый период
|
||||
s.FillPeriod(timestamp, value, p)
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.sinceValue = value
|
||||
s.untilValue = value
|
||||
s.entries = 1
|
||||
return true
|
||||
}
|
||||
|
||||
// начало периода
|
||||
s.since = timestamp
|
||||
s.sinceValue = value
|
||||
s.entries++
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) FillPeriod(prevTimestamp uint32, value float64, p *CumulativePeriod) bool {
|
||||
if s.entries == 0 {
|
||||
return false
|
||||
}
|
||||
p.Period = s.currentPeriod
|
||||
if prevTimestamp > 0 {
|
||||
p.Since = prevTimestamp
|
||||
p.Total = s.untilValue - value
|
||||
} else {
|
||||
p.Since = s.since
|
||||
p.Total = s.untilValue - s.sinceValue
|
||||
}
|
||||
p.Until = s.until
|
||||
p.EndValue = s.untilValue
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func groupByHour(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "h").Unix())
|
||||
}
|
||||
|
||||
func groupByDay(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d").Unix())
|
||||
}
|
||||
|
||||
func groupByMonth(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m").Unix())
|
||||
}
|
||||
@@ -378,10 +378,6 @@ func (s *Atree) AppendDataPage(req AppendDataPageReq) (_ redo.Report, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// На данний момен схема - наступна. Всі сторінки - data та index - зафіксовані в кеші.
|
||||
// Отже запис на диск пройде максимально швидко. Після цього ReferenceCount кожної
|
||||
// сторінки зменшиться на 1. Оскільки на метрику утримується XLock, сторінки мають
|
||||
// ReferenceCount = 1 (немає інших читачів).
|
||||
waitCh := make(chan struct{})
|
||||
|
||||
task := WriteTask{
|
||||
|
||||
16
atree/io.go
16
atree/io.go
@@ -8,7 +8,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
diploma "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/atree/redo"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
)
|
||||
@@ -74,8 +74,8 @@ func (s *Atree) releaseIndexPage(pageNo uint32) {
|
||||
p.ReferenceCount--
|
||||
return
|
||||
} else {
|
||||
octopus.Abort(
|
||||
octopus.ReferenceCountBug,
|
||||
diploma.Abort(
|
||||
diploma.ReferenceCountBug,
|
||||
fmt.Errorf("call releaseIndexPage on page %d with reference count = %d",
|
||||
pageNo, p.ReferenceCount),
|
||||
)
|
||||
@@ -98,7 +98,7 @@ func (s *Atree) allocIndexPage() AllocatedPage {
|
||||
} else {
|
||||
s.mutex.Lock()
|
||||
if s.allocatedIndexPagesQty == math.MaxUint32 {
|
||||
octopus.Abort(octopus.MaxAtreeSizeExceeded,
|
||||
diploma.Abort(diploma.MaxAtreeSizeExceeded,
|
||||
errors.New("no space in Atree index"))
|
||||
}
|
||||
s.allocatedIndexPagesQty++
|
||||
@@ -163,8 +163,8 @@ func (s *Atree) releaseDataPage(pageNo uint32) {
|
||||
p.ReferenceCount--
|
||||
return
|
||||
} else {
|
||||
octopus.Abort(
|
||||
octopus.ReferenceCountBug,
|
||||
diploma.Abort(
|
||||
diploma.ReferenceCountBug,
|
||||
fmt.Errorf("call releaseDataPage on page %d with reference count = %d",
|
||||
pageNo, p.ReferenceCount),
|
||||
)
|
||||
@@ -186,7 +186,7 @@ func (s *Atree) allocDataPage() AllocatedPage {
|
||||
} else {
|
||||
s.mutex.Lock()
|
||||
if s.allocatedDataPagesQty == math.MaxUint32 {
|
||||
octopus.Abort(octopus.MaxAtreeSizeExceeded,
|
||||
diploma.Abort(diploma.MaxAtreeSizeExceeded,
|
||||
errors.New("no space in Atree index"))
|
||||
}
|
||||
s.allocatedDataPagesQty++
|
||||
@@ -303,7 +303,7 @@ func (s *Atree) pageWriter() {
|
||||
case <-s.writeSignalCh:
|
||||
err := s.writeTasks()
|
||||
if err != nil {
|
||||
octopus.Abort(octopus.WriteToAtreeFailed, err)
|
||||
diploma.Abort(diploma.WriteToAtreeFailed, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
660
atree/select.go
660
atree/select.go
@@ -3,81 +3,17 @@ package atree
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma"
|
||||
)
|
||||
|
||||
type IterateAllCumulativeByTreeCursorReq struct {
|
||||
type ContinueFullScanReq struct {
|
||||
MetricType diploma.MetricType
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
EndTimestamp uint32
|
||||
EndValue float64
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) IterateAllCumulativeByTreeCursor(req IterateAllCumulativeByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.PageNo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.PageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp = req.EndTimestamp
|
||||
endValue = req.EndValue
|
||||
)
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endTimestamp = timestamp
|
||||
endValue = value
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueIterateCumulativeByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
ResponseWriter diploma.AtreeMeasureConsumer
|
||||
LastPageNo uint32
|
||||
EndTimestamp uint32
|
||||
EndValue float64
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueIterateCumulativeByTreeCursor(req ContinueIterateCumulativeByTreeCursorReq) error {
|
||||
func (s *Atree) ContinueFullScan(req ContinueFullScanReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
@@ -88,528 +24,110 @@ func (s *Atree) ContinueIterateCumulativeByTreeCursor(req ContinueIterateCumulat
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
MetricType: req.MetricType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp = req.EndTimestamp
|
||||
endValue = req.EndValue
|
||||
)
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueRangeScanReq struct {
|
||||
MetricType diploma.MetricType
|
||||
FracDigits byte
|
||||
ResponseWriter diploma.AtreeMeasureConsumer
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueRangeScan(req ContinueRangeScanReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: req.MetricType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RangeScanReq struct {
|
||||
MetricType diploma.MetricType
|
||||
FracDigits byte
|
||||
ResponseWriter diploma.AtreeMeasureConsumer
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) RangeScan(req RangeScanReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: req.MetricType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
//fmt.Printf("atree range scan: %s, %v\n", time.Unix(int64(timestamp), 0).Format("2006-01-02 15:04:05"), value)
|
||||
|
||||
if timestamp <= req.Until {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// bug panic
|
||||
panic("continue cumulative but timestamp > req.Until")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindAndIterateCumulativeByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
RootPageNo uint32
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) FindAndIterateCumulativeByTreeCursor(req FindAndIterateCumulativeByTreeCursorReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp uint32
|
||||
endValue float64
|
||||
)
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
if endTimestamp > 0 {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
continue
|
||||
}
|
||||
|
||||
if endTimestamp > 0 {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
endTimestamp = timestamp
|
||||
endValue = value
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type IterateAllInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) IterateAllInstantByTreeCursor(req IterateAllInstantByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.PageNo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.PageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueIterateInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
LastPageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueIterateInstantByTreeCursor(req ContinueIterateInstantByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
// - записи закончились;
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
panic("continue instant timestamp > req.Until")
|
||||
}
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindAndIterateInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
RootPageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) FindAndIterateInstantByTreeCursor(req FindAndIterateInstantByTreeCursorReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
continue
|
||||
}
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueCollectInstantPeriodsReq struct {
|
||||
FracDigits byte
|
||||
Aggregator *InstantAggregator
|
||||
ResponseWriter *InstantPeriodsWriter
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueCollectInstantPeriods(req ContinueCollectInstantPeriodsReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var period InstantPeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := req.Aggregator.FillPeriod(timestamp, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindInstantPeriodsReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter *InstantPeriodsWriter
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func (s *Atree) FindInstantPeriods(req FindInstantPeriodsReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggregator, err := NewInstantAggregator(InstantAggregatorOptions{
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
var period InstantPeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := aggregator.FillPeriod(timestamp, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindCumulativePeriodsReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter *CumulativePeriodsWriter
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func (s *Atree) FindCumulativePeriods(req FindCumulativePeriodsReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggregator, err := NewCumulativeAggregator(CumulativeAggregatorOptions{
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
var period CumulativePeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := aggregator.FillPeriod(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueCollectCumulativePeriodsReq struct {
|
||||
FracDigits byte
|
||||
Aggregator *CumulativeAggregator
|
||||
ResponseWriter *CumulativePeriodsWriter
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueCollectCumulativePeriods(req ContinueCollectCumulativePeriodsReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var period CumulativePeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := req.Aggregator.FillPeriod(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
306
atree/writers.go
306
atree/writers.go
@@ -1,306 +0,0 @@
|
||||
package atree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
// CURRENT VALUE WRITER
|
||||
|
||||
type CurrentValue struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type CurrentValueWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCurrentValueWriter(dst io.Writer) *CurrentValueWriter {
|
||||
return &CurrentValueWriter{
|
||||
arr: make([]byte, 16),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) BufferValue(m CurrentValue) {
|
||||
bin.PutUint32(s.arr[0:], m.MetricID)
|
||||
bin.PutUint32(s.arr[4:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[8:], m.Value)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// INSTANT MEASURE WRITER
|
||||
|
||||
type InstantMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type InstantMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewInstantMeasureWriter(dst io.Writer) *InstantMeasureWriter {
|
||||
return &InstantMeasureWriter{
|
||||
arr: make([]byte, 12),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) BufferMeasure(m InstantMeasure) {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) WriteMeasure(m InstantMeasure) error {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// CUMULATIVE MEASURE WRITER
|
||||
|
||||
type CumulativeMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
type CumulativeMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCumulativeMeasureWriter(dst io.Writer) *CumulativeMeasureWriter {
|
||||
return &CumulativeMeasureWriter{
|
||||
arr: make([]byte, 20),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) BufferMeasure(m CumulativeMeasure) {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
bin.PutFloat64(s.arr[12:], m.Total)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) WriteMeasure(m CumulativeMeasure) error {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
bin.PutFloat64(s.arr[12:], m.Total)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// INSTANT AGGREGATE WRITER
|
||||
|
||||
type InstantPeriodsWriter struct {
|
||||
aggregateFuncs byte
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewInstantPeriodsWriter(dst io.Writer, aggregateFuncs byte) *InstantPeriodsWriter {
|
||||
var q int
|
||||
if (aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
|
||||
q++
|
||||
}
|
||||
if (aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
|
||||
q++
|
||||
}
|
||||
if (aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
|
||||
q++
|
||||
}
|
||||
return &InstantPeriodsWriter{
|
||||
aggregateFuncs: aggregateFuncs,
|
||||
arr: make([]byte, 12+q*8),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
type InstantPeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
Min float64
|
||||
Max float64
|
||||
Avg float64
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) BufferMeasure(p InstantPeriod) {
|
||||
s.pack(p)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) WritePeriod(p InstantPeriod) error {
|
||||
s.pack(p)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) pack(p InstantPeriod) {
|
||||
bin.PutUint32(s.arr[0:], p.Period)
|
||||
bin.PutUint32(s.arr[4:], p.Since)
|
||||
bin.PutUint32(s.arr[8:], p.Until)
|
||||
|
||||
pos := 12
|
||||
if (s.aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
|
||||
bin.PutFloat64(s.arr[pos:], p.Min)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
|
||||
bin.PutFloat64(s.arr[pos:], p.Max)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
|
||||
bin.PutFloat64(s.arr[pos:], p.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// CUMULATIVE AGGREGATE WRITER
|
||||
|
||||
type CumulativePeriodsWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCumulativePeriodsWriter(dst io.Writer) *CumulativePeriodsWriter {
|
||||
return &CumulativePeriodsWriter{
|
||||
arr: make([]byte, 28),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
type CumulativePeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
EndValue float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) BufferMeasure(p CumulativePeriod) {
|
||||
s.pack(p)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) WritePeriod(p CumulativePeriod) error {
|
||||
s.pack(p)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) pack(p CumulativePeriod) {
|
||||
bin.PutUint32(s.arr[0:], p.Period)
|
||||
bin.PutUint32(s.arr[4:], p.Since)
|
||||
bin.PutUint32(s.arr[8:], p.Until)
|
||||
bin.PutFloat64(s.arr[12:], p.EndValue)
|
||||
bin.PutFloat64(s.arr[20:], p.Total)
|
||||
}
|
||||
|
||||
// CHUNKED RESPONDER
|
||||
|
||||
//const headerSize = 3
|
||||
|
||||
var endMsg = []byte{
|
||||
proto.RespEndOfValue, // end of stream
|
||||
}
|
||||
|
||||
type ChunkedResponder struct {
|
||||
recordsQty int
|
||||
buf *bytes.Buffer
|
||||
dst io.Writer
|
||||
}
|
||||
|
||||
func NewChunkedResponder(dst io.Writer) *ChunkedResponder {
|
||||
s := &ChunkedResponder{
|
||||
recordsQty: 0,
|
||||
buf: bytes.NewBuffer(nil),
|
||||
dst: dst,
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) BufferRecord(rec []byte) {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) AppendRecord(rec []byte) error {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
|
||||
if s.buf.Len() < 1500 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
s.recordsQty = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) Flush() error {
|
||||
if s.recordsQty > 0 {
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := s.dst.Write(endMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) sendBuffered() (err error) {
|
||||
msg := s.buf.Bytes()
|
||||
bin.PutUint32(msg[1:], uint32(s.recordsQty))
|
||||
n, err := s.dst.Write(msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(msg) {
|
||||
return fmt.Errorf("incomplete write %d bytes instead of %d", n, len(msg))
|
||||
}
|
||||
s.buf.Reset()
|
||||
return
|
||||
}
|
||||
191
client/client.go
191
client/client.go
@@ -66,13 +66,7 @@ func (s *Connection) mustSuccess(reader *bufreader.BufferedReader) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
MetricID uint32
|
||||
MetricType diploma.MetricType
|
||||
FracDigits byte
|
||||
}
|
||||
|
||||
func (s *Connection) AddMetric(req Metric) error {
|
||||
func (s *Connection) AddMetric(req proto.AddMetricReq) error {
|
||||
arr := []byte{
|
||||
proto.TypeAddMetric,
|
||||
0, 0, 0, 0, //
|
||||
@@ -87,7 +81,7 @@ func (s *Connection) AddMetric(req Metric) error {
|
||||
return s.mustSuccess(s.src)
|
||||
}
|
||||
|
||||
func (s *Connection) GetMetric(metricID uint32) (*Metric, error) {
|
||||
func (s *Connection) GetMetric(metricID uint32) (*proto.Metric, error) {
|
||||
arr := []byte{
|
||||
proto.TypeGetMetric,
|
||||
0, 0, 0, 0,
|
||||
@@ -110,10 +104,10 @@ func (s *Connection) GetMetric(metricID uint32) (*Metric, error) {
|
||||
return nil, fmt.Errorf("read body: %s", err)
|
||||
}
|
||||
|
||||
return &Metric{
|
||||
return &proto.Metric{
|
||||
MetricID: bin.GetUint32(arr),
|
||||
MetricType: diploma.MetricType(arr[4]),
|
||||
FracDigits: arr[5],
|
||||
FracDigits: int(arr[5]),
|
||||
}, nil
|
||||
|
||||
case proto.RespError:
|
||||
@@ -137,13 +131,7 @@ func (s *Connection) DeleteMetric(metricID uint32) error {
|
||||
return s.mustSuccess(s.src)
|
||||
}
|
||||
|
||||
type AppendMeasureReq struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (s *Connection) AppendMeasure(req AppendMeasureReq) (err error) {
|
||||
func (s *Connection) AppendMeasure(req proto.AppendMeasureReq) (err error) {
|
||||
arr := []byte{
|
||||
proto.TypeAppendMeasure,
|
||||
0, 0, 0, 0, // metricID
|
||||
@@ -160,17 +148,7 @@ func (s *Connection) AppendMeasure(req AppendMeasureReq) (err error) {
|
||||
return s.mustSuccess(s.src)
|
||||
}
|
||||
|
||||
type AppendMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Measures []Measure
|
||||
}
|
||||
|
||||
type Measure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (s *Connection) AppendMeasures(req AppendMeasuresReq) (err error) {
|
||||
func (s *Connection) AppendMeasures(req proto.AppendMeasuresReq) (err error) {
|
||||
if len(req.Measures) > 65535 {
|
||||
return fmt.Errorf("wrong measures qty: %d", len(req.Measures))
|
||||
}
|
||||
@@ -194,12 +172,7 @@ func (s *Connection) AppendMeasures(req AppendMeasuresReq) (err error) {
|
||||
return s.mustSuccess(s.src)
|
||||
}
|
||||
|
||||
type InstantMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]InstantMeasure, error) {
|
||||
func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]proto.InstantMeasure, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListAllInstantMeasures,
|
||||
0, 0, 0, 0, // metricID
|
||||
@@ -211,7 +184,7 @@ func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]InstantMeasure,
|
||||
}
|
||||
|
||||
var (
|
||||
result []InstantMeasure
|
||||
result []proto.InstantMeasure
|
||||
tmp = make([]byte, 12)
|
||||
)
|
||||
|
||||
@@ -234,7 +207,7 @@ func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]InstantMeasure,
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
|
||||
result = append(result, InstantMeasure{
|
||||
result = append(result, proto.InstantMeasure{
|
||||
Timestamp: bin.GetUint32(tmp),
|
||||
Value: bin.GetFloat64(tmp[4:]),
|
||||
})
|
||||
@@ -252,13 +225,12 @@ func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]InstantMeasure,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]InstantMeasure, error) {
|
||||
func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]proto.InstantMeasure, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListInstantMeasures,
|
||||
0, 0, 0, 0, // metricID
|
||||
0, 0, 0, 0, // since
|
||||
0, 0, 0, 0, // until
|
||||
byte(req.FirstHourOfDay),
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], req.Since)
|
||||
@@ -269,7 +241,7 @@ func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]In
|
||||
}
|
||||
|
||||
var (
|
||||
result []InstantMeasure
|
||||
result []proto.InstantMeasure
|
||||
tmp = make([]byte, 12)
|
||||
)
|
||||
|
||||
@@ -292,7 +264,7 @@ func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]In
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
|
||||
result = append(result, InstantMeasure{
|
||||
result = append(result, proto.InstantMeasure{
|
||||
Timestamp: bin.GetUint32(tmp),
|
||||
Value: bin.GetFloat64(tmp[4:]),
|
||||
})
|
||||
@@ -310,13 +282,7 @@ func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]In
|
||||
}
|
||||
}
|
||||
|
||||
type CumulativeMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]CumulativeMeasure, error) {
|
||||
func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]proto.CumulativeMeasure, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListAllCumulativeMeasures,
|
||||
0, 0, 0, 0, // metricID
|
||||
@@ -328,7 +294,7 @@ func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]CumulativeMea
|
||||
}
|
||||
|
||||
var (
|
||||
result []CumulativeMeasure
|
||||
result []proto.CumulativeMeasure
|
||||
tmp = make([]byte, 20)
|
||||
)
|
||||
|
||||
@@ -351,7 +317,7 @@ func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]CumulativeMea
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
|
||||
result = append(result, CumulativeMeasure{
|
||||
result = append(result, proto.CumulativeMeasure{
|
||||
Timestamp: bin.GetUint32(tmp),
|
||||
Value: bin.GetFloat64(tmp[4:]),
|
||||
Total: bin.GetFloat64(tmp[12:]),
|
||||
@@ -370,13 +336,12 @@ func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]CumulativeMea
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq) ([]CumulativeMeasure, error) {
|
||||
func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq) ([]proto.CumulativeMeasure, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListCumulativeMeasures,
|
||||
0, 0, 0, 0, // metricID
|
||||
0, 0, 0, 0, // since
|
||||
0, 0, 0, 0, // until
|
||||
byte(req.FirstHourOfDay),
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], req.Since)
|
||||
@@ -387,7 +352,7 @@ func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq)
|
||||
}
|
||||
|
||||
var (
|
||||
result []CumulativeMeasure
|
||||
result []proto.CumulativeMeasure
|
||||
tmp = make([]byte, 20)
|
||||
)
|
||||
|
||||
@@ -410,7 +375,7 @@ func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq)
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
|
||||
result = append(result, CumulativeMeasure{
|
||||
result = append(result, proto.CumulativeMeasure{
|
||||
Timestamp: bin.GetUint32(tmp),
|
||||
Value: bin.GetFloat64(tmp[4:]),
|
||||
Total: bin.GetFloat64(tmp[12:]),
|
||||
@@ -429,16 +394,7 @@ func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq)
|
||||
}
|
||||
}
|
||||
|
||||
type InstantPeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
Min float64
|
||||
Max float64
|
||||
Avg float64
|
||||
}
|
||||
|
||||
func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]InstantPeriod, error) {
|
||||
func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]proto.InstantPeriod, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListInstantPeriods,
|
||||
0, 0, 0, 0, // metricID
|
||||
@@ -449,8 +405,12 @@ func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]Inst
|
||||
byte(req.FirstHourOfDay),
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], req.Since)
|
||||
bin.PutUint32(arr[9:], req.Until)
|
||||
bin.PutUint16(arr[5:], uint16(req.Since.Year))
|
||||
arr[7] = byte(req.Since.Month)
|
||||
arr[8] = byte(req.Since.Day)
|
||||
bin.PutUint16(arr[9:], uint16(req.Until.Year))
|
||||
arr[11] = byte(req.Until.Month)
|
||||
arr[12] = byte(req.Until.Day)
|
||||
|
||||
if _, err := s.conn.Write(arr); err != nil {
|
||||
return nil, err
|
||||
@@ -468,7 +428,7 @@ func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]Inst
|
||||
}
|
||||
|
||||
var (
|
||||
result []InstantPeriod
|
||||
result []proto.InstantPeriod
|
||||
// 12 bytes - period, since, until
|
||||
// q * 8 bytes - min, max, avg
|
||||
tmp = make([]byte, 12+q*8)
|
||||
@@ -494,10 +454,10 @@ func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]Inst
|
||||
}
|
||||
|
||||
var (
|
||||
p = InstantPeriod{
|
||||
p = proto.InstantPeriod{
|
||||
Period: bin.GetUint32(tmp[0:]),
|
||||
Since: bin.GetUint32(tmp[4:]),
|
||||
Until: bin.GetUint32(tmp[8:]),
|
||||
Start: bin.GetUint32(tmp[4:]),
|
||||
End: bin.GetUint32(tmp[8:]),
|
||||
}
|
||||
// 12 bytes - period, since, until
|
||||
pos = 12
|
||||
@@ -529,15 +489,7 @@ func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]Inst
|
||||
}
|
||||
}
|
||||
|
||||
type CumulativePeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
EndValue float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) ([]CumulativePeriod, error) {
|
||||
func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) ([]proto.CumulativePeriod, error) {
|
||||
arr := []byte{
|
||||
proto.TypeListCumulativePeriods,
|
||||
0, 0, 0, 0, // metricID
|
||||
@@ -547,15 +499,19 @@ func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) (
|
||||
byte(req.FirstHourOfDay),
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], req.Since)
|
||||
bin.PutUint32(arr[9:], req.Until)
|
||||
bin.PutUint16(arr[5:], uint16(req.Since.Year))
|
||||
arr[7] = byte(req.Since.Month)
|
||||
arr[8] = byte(req.Since.Day)
|
||||
bin.PutUint16(arr[9:], uint16(req.Until.Year))
|
||||
arr[11] = byte(req.Until.Month)
|
||||
arr[12] = byte(req.Until.Day)
|
||||
|
||||
if _, err := s.conn.Write(arr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
result []CumulativePeriod
|
||||
result []proto.CumulativePeriod
|
||||
tmp = make([]byte, 28)
|
||||
)
|
||||
|
||||
@@ -577,12 +533,12 @@ func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) (
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
result = append(result, CumulativePeriod{
|
||||
Period: bin.GetUint32(tmp[0:]),
|
||||
Since: bin.GetUint32(tmp[4:]),
|
||||
Until: bin.GetUint32(tmp[8:]),
|
||||
EndValue: bin.GetFloat64(tmp[12:]),
|
||||
Total: bin.GetFloat64(tmp[20:]),
|
||||
result = append(result, proto.CumulativePeriod{
|
||||
Period: bin.GetUint32(tmp[0:]),
|
||||
Start: bin.GetUint32(tmp[4:]),
|
||||
End: bin.GetUint32(tmp[8:]),
|
||||
StartValue: bin.GetFloat64(tmp[12:]),
|
||||
EndValue: bin.GetFloat64(tmp[20:]),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -598,13 +554,7 @@ func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) (
|
||||
}
|
||||
}
|
||||
|
||||
type CurrentValue struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (s *Connection) ListCurrentValues(metricIDs []uint32) ([]CurrentValue, error) {
|
||||
func (s *Connection) ListCurrentValues(metricIDs []uint32) ([]proto.CurrentValue, error) {
|
||||
arr := make([]byte, 3+metricKeySize*len(metricIDs))
|
||||
arr[0] = proto.TypeListCurrentValues
|
||||
|
||||
@@ -621,7 +571,7 @@ func (s *Connection) ListCurrentValues(metricIDs []uint32) ([]CurrentValue, erro
|
||||
}
|
||||
|
||||
var (
|
||||
result []CurrentValue
|
||||
result []proto.CurrentValue
|
||||
tmp = make([]byte, 16)
|
||||
)
|
||||
|
||||
@@ -644,7 +594,7 @@ func (s *Connection) ListCurrentValues(metricIDs []uint32) ([]CurrentValue, erro
|
||||
return nil, fmt.Errorf("read record #%d: %s", i, err)
|
||||
}
|
||||
|
||||
result = append(result, CurrentValue{
|
||||
result = append(result, proto.CurrentValue{
|
||||
MetricID: bin.GetUint32(tmp),
|
||||
Timestamp: bin.GetUint32(tmp[4:]),
|
||||
Value: bin.GetFloat64(tmp[8:]),
|
||||
@@ -678,55 +628,6 @@ func (s *Connection) DeleteMeasures(req proto.DeleteMeasuresReq) (err error) {
|
||||
return s.mustSuccess(s.src)
|
||||
}
|
||||
|
||||
type RangeTotalResp struct {
|
||||
Since uint32
|
||||
SinceValue float64
|
||||
Until uint32
|
||||
UntilValue float64
|
||||
}
|
||||
|
||||
func (s *Connection) RangeTotal(req proto.RangeTotalReq) (*RangeTotalResp, error) {
|
||||
arr := []byte{
|
||||
proto.TypeGetMetric,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], req.Since)
|
||||
bin.PutUint32(arr[9:], req.MetricID)
|
||||
|
||||
if _, err := s.conn.Write(arr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
code, err := s.src.ReadByte()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response code: %s", err)
|
||||
}
|
||||
|
||||
switch code {
|
||||
case proto.RespValue:
|
||||
arr, err := s.src.ReadN(24)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read body: %s", err)
|
||||
}
|
||||
|
||||
return &RangeTotalResp{
|
||||
Since: bin.GetUint32(arr),
|
||||
SinceValue: bin.GetFloat64(arr[4:]),
|
||||
Until: bin.GetUint32(arr[12:]),
|
||||
UntilValue: bin.GetFloat64(arr[16:]),
|
||||
}, nil
|
||||
|
||||
case proto.RespError:
|
||||
return nil, s.onError()
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown reponse code %d", code)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Connection) onError() error {
|
||||
errorCode, err := bin.ReadUint16(s.src)
|
||||
if err != nil {
|
||||
|
||||
480
database/api.go
480
database/api.go
@@ -13,6 +13,7 @@ import (
|
||||
"gordenko.dev/dima/diploma/chunkenc"
|
||||
"gordenko.dev/dima/diploma/conbuf"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
"gordenko.dev/dima/diploma/transform"
|
||||
"gordenko.dev/dima/diploma/txlog"
|
||||
)
|
||||
|
||||
@@ -326,6 +327,9 @@ type FilledPage struct {
|
||||
}
|
||||
|
||||
type tryAppendMeasureResult struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
FilledPage *FilledPage
|
||||
ResultCode byte
|
||||
}
|
||||
@@ -441,7 +445,6 @@ func (s *Database) AppendMeasures(req proto.AppendMeasuresReq) uint16 {
|
||||
)
|
||||
|
||||
for idx, measure := range req.Measures {
|
||||
//fmt.Printf("%d %v\n", measure.Timestamp, measure.Value)
|
||||
if since == 0 {
|
||||
since = measure.Timestamp
|
||||
} else {
|
||||
@@ -470,7 +473,6 @@ func (s *Database) AppendMeasures(req proto.AppendMeasuresReq) uint16 {
|
||||
)
|
||||
<-waitCh
|
||||
}
|
||||
//fmt.Printf("m.Value: %v < untilValue: %v\n", measure.Value, untilValue)
|
||||
return proto.ErrNonMonotonicValue
|
||||
}
|
||||
}
|
||||
@@ -497,10 +499,11 @@ func (s *Database) AppendMeasures(req proto.AppendMeasuresReq) uint16 {
|
||||
|
||||
toAppendMeasures = nil
|
||||
}
|
||||
//fmt.Printf("APPEND DATA PAGE %d, %v\n", measure.Timestamp, measure.Value)
|
||||
report, err := s.atree.AppendDataPage(atree.AppendDataPageReq{
|
||||
MetricID: req.MetricID,
|
||||
Timestamp: until,
|
||||
Value: untilValue,
|
||||
Timestamp: measure.Timestamp,
|
||||
Value: measure.Value,
|
||||
Since: since,
|
||||
RootPageNo: rootPageNo,
|
||||
PrevPageNo: prevPageNo,
|
||||
@@ -593,23 +596,27 @@ func (s *Database) DeleteMeasures(req proto.DeleteMeasuresReq) uint16 {
|
||||
result := <-resultCh
|
||||
|
||||
switch result.ResultCode {
|
||||
case Succeed:
|
||||
var (
|
||||
freeDataPages []uint32
|
||||
freeIndexPages []uint32
|
||||
)
|
||||
if result.RootPageNo > 0 {
|
||||
pageLists, err := s.atree.GetAllPages(result.RootPageNo)
|
||||
if err != nil {
|
||||
diploma.Abort(diploma.FailedAtreeRequest, err)
|
||||
}
|
||||
freeDataPages = pageLists.DataPages
|
||||
freeIndexPages = pageLists.IndexPages
|
||||
case NoMeasuresToDelete:
|
||||
// ok
|
||||
|
||||
case DeleteFromAtreeNotNeeded:
|
||||
// регистрирую удаление в TransactionLog
|
||||
waitCh := s.txlog.WriteDeletedMeasures(txlog.DeletedMeasures{
|
||||
MetricID: req.MetricID,
|
||||
})
|
||||
<-waitCh
|
||||
|
||||
case DeleteFromAtreeRequired:
|
||||
// собираю номера всех data и index страниц метрики (типа запись REDO лога).
|
||||
pageLists, err := s.atree.GetAllPages(result.RootPageNo)
|
||||
if err != nil {
|
||||
diploma.Abort(diploma.FailedAtreeRequest, err)
|
||||
}
|
||||
// регистрирую удаление в TransactionLog
|
||||
waitCh := s.txlog.WriteDeletedMeasures(txlog.DeletedMeasures{
|
||||
MetricID: req.MetricID,
|
||||
FreeDataPages: freeDataPages,
|
||||
FreeIndexPages: freeIndexPages,
|
||||
FreeDataPages: pageLists.DataPages,
|
||||
FreeIndexPages: pageLists.IndexPages,
|
||||
})
|
||||
<-waitCh
|
||||
|
||||
@@ -624,98 +631,32 @@ func (s *Database) DeleteMeasures(req proto.DeleteMeasuresReq) uint16 {
|
||||
|
||||
// SELECT
|
||||
|
||||
type instantMeasuresResult struct {
|
||||
type fullScanResult struct {
|
||||
ResultCode byte
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
LastPageNo uint32
|
||||
}
|
||||
|
||||
func (s *Database) ListAllInstantMeasures(conn net.Conn, req proto.ListAllInstantMetricMeasuresReq) error {
|
||||
resultCh := make(chan instantMeasuresResult, 1)
|
||||
responseWriter := transform.NewInstantMeasureWriter(conn, 0)
|
||||
|
||||
responseWriter := atree.NewInstantMeasureWriter(conn)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListAllInstantMeasuresReq{
|
||||
return s.fullScan(fullScanReq{
|
||||
MetricID: req.MetricID,
|
||||
MetricType: diploma.Instant,
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
result := <-resultCh
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.IterateAllInstantByTreeCursor(atree.IterateAllInstantByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
PageNo: result.PageNo,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Database) ListAllCumulativeMeasures(conn io.Writer, req proto.ListAllCumulativeMeasuresReq) error {
|
||||
resultCh := make(chan cumulativeMeasuresResult, 1)
|
||||
responseWriter := transform.NewCumulativeMeasureWriter(conn, 0)
|
||||
|
||||
responseWriter := atree.NewCumulativeMeasureWriter(conn)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListAllCumulativeMeasuresReq{
|
||||
return s.fullScan(fullScanReq{
|
||||
MetricID: req.MetricID,
|
||||
MetricType: diploma.Cumulative,
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
result := <-resultCh
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.IterateAllCumulativeByTreeCursor(atree.IterateAllCumulativeByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
PageNo: result.PageNo,
|
||||
EndTimestamp: result.EndTimestamp,
|
||||
EndValue: result.EndValue,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Database) ListInstantMeasures(conn net.Conn, req proto.ListInstantMeasuresReq) error {
|
||||
@@ -724,79 +665,16 @@ func (s *Database) ListInstantMeasures(conn net.Conn, req proto.ListInstantMeasu
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
since = req.Since
|
||||
until = req.Until
|
||||
)
|
||||
if req.FirstHourOfDay > 0 {
|
||||
since, until = correctToFHD(req.Since, req.Until, req.FirstHourOfDay)
|
||||
}
|
||||
responseWriter := transform.NewInstantMeasureWriter(conn, req.Since)
|
||||
|
||||
resultCh := make(chan instantMeasuresResult, 1)
|
||||
responseWriter := atree.NewInstantMeasureWriter(conn)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListInstantMeasuresReq{
|
||||
return s.rangeScan(rangeScanReq{
|
||||
MetricID: req.MetricID,
|
||||
Since: since,
|
||||
Until: until,
|
||||
MetricType: diploma.Instant,
|
||||
Since: req.Since,
|
||||
Until: req.Until,
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
result := <-resultCh
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.ContinueIterateInstantByTreeCursor(atree.ContinueIterateInstantByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
Since: since,
|
||||
Until: until,
|
||||
LastPageNo: result.PageNo,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case UntilNotFound:
|
||||
err := s.atree.FindAndIterateInstantByTreeCursor(atree.FindAndIterateInstantByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
Since: since,
|
||||
Until: until,
|
||||
RootPageNo: result.PageNo,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cumulativeMeasuresResult struct {
|
||||
ResultCode byte
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
EndTimestamp uint32
|
||||
EndValue float64
|
||||
}
|
||||
|
||||
func (s *Database) ListCumulativeMeasures(conn net.Conn, req proto.ListCumulativeMeasuresReq) error {
|
||||
@@ -805,99 +683,37 @@ func (s *Database) ListCumulativeMeasures(conn net.Conn, req proto.ListCumulativ
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
since = req.Since
|
||||
until = req.Until
|
||||
)
|
||||
if req.FirstHourOfDay > 0 {
|
||||
since, until = correctToFHD(since, until, req.FirstHourOfDay)
|
||||
}
|
||||
responseWriter := transform.NewCumulativeMeasureWriter(conn, req.Since)
|
||||
|
||||
resultCh := make(chan cumulativeMeasuresResult, 1)
|
||||
responseWriter := atree.NewCumulativeMeasureWriter(conn)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListCumulativeMeasuresReq{
|
||||
return s.rangeScan(rangeScanReq{
|
||||
MetricID: req.MetricID,
|
||||
Since: since,
|
||||
Until: until,
|
||||
MetricType: diploma.Cumulative,
|
||||
Since: req.Since,
|
||||
Until: req.Until,
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
result := <-resultCh
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.ContinueIterateCumulativeByTreeCursor(atree.ContinueIterateCumulativeByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
Since: since,
|
||||
Until: until,
|
||||
LastPageNo: result.PageNo,
|
||||
EndTimestamp: result.EndTimestamp,
|
||||
EndValue: result.EndValue,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case UntilNotFound:
|
||||
err := s.atree.FindAndIterateCumulativeByTreeCursor(atree.FindAndIterateCumulativeByTreeCursorReq{
|
||||
FracDigits: result.FracDigits,
|
||||
Since: since,
|
||||
Until: until,
|
||||
RootPageNo: result.PageNo,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type instantPeriodsResult struct {
|
||||
type rangeScanResult struct {
|
||||
ResultCode byte
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
RootPageNo uint32
|
||||
LastPageNo uint32
|
||||
}
|
||||
|
||||
func (s *Database) ListInstantPeriods(conn net.Conn, req proto.ListInstantPeriodsReq) error {
|
||||
if req.Since > req.Until {
|
||||
since, until := timeBoundsOfAggregation(req.Since, req.Until, req.GroupBy, req.FirstHourOfDay)
|
||||
if since.After(until) {
|
||||
reply(conn, proto.ErrInvalidRange)
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
since = req.Since
|
||||
until = req.Until
|
||||
)
|
||||
if req.FirstHourOfDay > 0 {
|
||||
since, until = correctToFHD(since, until, req.FirstHourOfDay)
|
||||
}
|
||||
|
||||
resultCh := make(chan instantPeriodsResult, 1)
|
||||
|
||||
aggregator, err := atree.NewInstantAggregator(atree.InstantAggregatorOptions{
|
||||
responseWriter, err := transform.NewInstantPeriodsWriter(transform.InstantPeriodsWriterOptions{
|
||||
Dst: conn,
|
||||
GroupBy: req.GroupBy,
|
||||
Since: uint32(since.Unix()),
|
||||
AggregateFuncs: req.AggregateFuncs,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -905,14 +721,62 @@ func (s *Database) ListInstantPeriods(conn net.Conn, req proto.ListInstantPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
responseWriter := atree.NewInstantPeriodsWriter(conn, req.AggregateFuncs)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListInstantPeriodsReq{
|
||||
return s.rangeScan(rangeScanReq{
|
||||
MetricID: req.MetricID,
|
||||
Since: since,
|
||||
Until: until,
|
||||
Aggregator: aggregator,
|
||||
MetricType: diploma.Instant,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Database) ListCumulativePeriods(conn net.Conn, req proto.ListCumulativePeriodsReq) error {
|
||||
since, until := timeBoundsOfAggregation(req.Since, req.Until, req.GroupBy, req.FirstHourOfDay)
|
||||
if since.After(until) {
|
||||
reply(conn, proto.ErrInvalidRange)
|
||||
return nil
|
||||
}
|
||||
|
||||
responseWriter, err := transform.NewCumulativePeriodsWriter(transform.CumulativePeriodsWriterOptions{
|
||||
Dst: conn,
|
||||
GroupBy: req.GroupBy,
|
||||
Since: uint32(since.Unix()),
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.rangeScan(rangeScanReq{
|
||||
MetricID: req.MetricID,
|
||||
MetricType: diploma.Cumulative,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
Conn: conn,
|
||||
ResponseWriter: responseWriter,
|
||||
})
|
||||
}
|
||||
|
||||
type rangeScanReq struct {
|
||||
MetricID uint32
|
||||
MetricType diploma.MetricType
|
||||
Since uint32
|
||||
Until uint32
|
||||
Conn io.Writer
|
||||
ResponseWriter diploma.MeasureConsumer
|
||||
}
|
||||
|
||||
func (s *Database) rangeScan(req rangeScanReq) error {
|
||||
resultCh := make(chan rangeScanResult, 1)
|
||||
|
||||
s.appendJobToWorkerQueue(tryRangeScanReq{
|
||||
MetricID: req.MetricID,
|
||||
Since: req.Since,
|
||||
Until: req.Until,
|
||||
MetricType: req.MetricType,
|
||||
ResponseWriter: req.ResponseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
@@ -920,48 +784,46 @@ func (s *Database) ListInstantPeriods(conn net.Conn, req proto.ListInstantPeriod
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
req.ResponseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.ContinueCollectInstantPeriods(atree.ContinueCollectInstantPeriodsReq{
|
||||
err := s.atree.ContinueRangeScan(atree.ContinueRangeScanReq{
|
||||
MetricType: req.MetricType,
|
||||
FracDigits: result.FracDigits,
|
||||
Aggregator: aggregator,
|
||||
ResponseWriter: responseWriter,
|
||||
LastPageNo: result.PageNo,
|
||||
Since: since,
|
||||
Until: until,
|
||||
ResponseWriter: req.ResponseWriter,
|
||||
LastPageNo: result.LastPageNo,
|
||||
Since: req.Since,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
reply(req.Conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
req.ResponseWriter.Close()
|
||||
}
|
||||
|
||||
case UntilNotFound:
|
||||
err := s.atree.FindInstantPeriods(atree.FindInstantPeriodsReq{
|
||||
err := s.atree.RangeScan(atree.RangeScanReq{
|
||||
MetricType: req.MetricType,
|
||||
FracDigits: result.FracDigits,
|
||||
ResponseWriter: responseWriter,
|
||||
RootPageNo: result.PageNo,
|
||||
Since: since,
|
||||
Until: until,
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
ResponseWriter: req.ResponseWriter,
|
||||
RootPageNo: result.RootPageNo,
|
||||
Since: req.Since,
|
||||
Until: req.Until,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
reply(req.Conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
req.ResponseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
reply(req.Conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
reply(req.Conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
@@ -969,45 +831,20 @@ func (s *Database) ListInstantPeriods(conn net.Conn, req proto.ListInstantPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
type cumulativePeriodsResult struct {
|
||||
ResultCode byte
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
type fullScanReq struct {
|
||||
MetricID uint32
|
||||
MetricType diploma.MetricType
|
||||
Conn io.Writer
|
||||
ResponseWriter diploma.MeasureConsumer
|
||||
}
|
||||
|
||||
func (s *Database) ListCumulativePeriods(conn net.Conn, req proto.ListCumulativePeriodsReq) error {
|
||||
if req.Since > req.Until {
|
||||
reply(conn, proto.ErrInvalidRange)
|
||||
return nil
|
||||
}
|
||||
func (s *Database) fullScan(req fullScanReq) error {
|
||||
resultCh := make(chan fullScanResult, 1)
|
||||
|
||||
var (
|
||||
since = req.Since
|
||||
until = req.Until
|
||||
)
|
||||
if req.FirstHourOfDay > 0 {
|
||||
since, until = correctToFHD(since, until, req.FirstHourOfDay)
|
||||
}
|
||||
|
||||
resultCh := make(chan cumulativePeriodsResult, 1)
|
||||
|
||||
aggregator, err := atree.NewCumulativeAggregator(atree.CumulativeAggregatorOptions{
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
return nil
|
||||
}
|
||||
|
||||
responseWriter := atree.NewCumulativePeriodsWriter(conn)
|
||||
|
||||
s.appendJobToWorkerQueue(tryListCumulativePeriodsReq{
|
||||
s.appendJobToWorkerQueue(tryFullScanReq{
|
||||
MetricID: req.MetricID,
|
||||
Since: since,
|
||||
Until: until,
|
||||
Aggregator: aggregator,
|
||||
ResponseWriter: responseWriter,
|
||||
MetricType: req.MetricType,
|
||||
ResponseWriter: req.ResponseWriter,
|
||||
ResultCh: resultCh,
|
||||
})
|
||||
|
||||
@@ -1015,48 +852,27 @@ func (s *Database) ListCumulativePeriods(conn net.Conn, req proto.ListCumulative
|
||||
|
||||
switch result.ResultCode {
|
||||
case QueryDone:
|
||||
responseWriter.Close()
|
||||
req.ResponseWriter.Close()
|
||||
|
||||
case UntilFound:
|
||||
err := s.atree.ContinueCollectCumulativePeriods(atree.ContinueCollectCumulativePeriodsReq{
|
||||
err := s.atree.ContinueFullScan(atree.ContinueFullScanReq{
|
||||
MetricType: req.MetricType,
|
||||
FracDigits: result.FracDigits,
|
||||
Aggregator: aggregator,
|
||||
ResponseWriter: responseWriter,
|
||||
LastPageNo: result.PageNo,
|
||||
Since: since,
|
||||
Until: until,
|
||||
ResponseWriter: req.ResponseWriter,
|
||||
LastPageNo: result.LastPageNo,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
reply(req.Conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
}
|
||||
|
||||
case UntilNotFound:
|
||||
err := s.atree.FindCumulativePeriods(atree.FindCumulativePeriodsReq{
|
||||
FracDigits: result.FracDigits,
|
||||
ResponseWriter: responseWriter,
|
||||
RootPageNo: result.PageNo,
|
||||
Since: since,
|
||||
Until: until,
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
})
|
||||
s.metricRUnlock(req.MetricID)
|
||||
|
||||
if err != nil {
|
||||
reply(conn, proto.ErrUnexpected)
|
||||
} else {
|
||||
responseWriter.Close()
|
||||
req.ResponseWriter.Close()
|
||||
}
|
||||
|
||||
case NoMetric:
|
||||
reply(conn, proto.ErrNoMetric)
|
||||
reply(req.Conn, proto.ErrNoMetric)
|
||||
|
||||
case WrongMetricType:
|
||||
reply(conn, proto.ErrWrongMetricType)
|
||||
reply(req.Conn, proto.ErrWrongMetricType)
|
||||
|
||||
default:
|
||||
diploma.Abort(diploma.WrongResultCodeBug, ErrWrongResultCodeBug)
|
||||
@@ -1065,7 +881,7 @@ func (s *Database) ListCumulativePeriods(conn net.Conn, req proto.ListCumulative
|
||||
}
|
||||
|
||||
func (s *Database) ListCurrentValues(conn net.Conn, req proto.ListCurrentValuesReq) error {
|
||||
responseWriter := atree.NewCurrentValueWriter(conn)
|
||||
responseWriter := transform.NewCurrentValueWriter(conn)
|
||||
defer responseWriter.Close()
|
||||
|
||||
resultCh := make(chan struct{})
|
||||
|
||||
@@ -5,8 +5,32 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
func timeBoundsOfAggregation(since, until proto.TimeBound, groupBy diploma.GroupBy, firstHourOfDay int) (s time.Time, u time.Time) {
|
||||
switch groupBy {
|
||||
case diploma.GroupByHour, diploma.GroupByDay:
|
||||
s = time.Date(since.Year, since.Month, since.Day, 0, 0, 0, 0, time.Local)
|
||||
u = time.Date(until.Year, until.Month, until.Day, 23, 59, 59, 0, time.Local)
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
s = time.Date(since.Year, since.Month, 1, 0, 0, 0, 0, time.Local)
|
||||
u = time.Date(until.Year, until.Month, 1, 23, 59, 59, 0, time.Local)
|
||||
u = u.AddDate(0, 1, 0)
|
||||
u = u.AddDate(0, 0, -1)
|
||||
}
|
||||
|
||||
if firstHourOfDay > 0 {
|
||||
duration := time.Duration(firstHourOfDay) * time.Hour
|
||||
s = s.Add(duration)
|
||||
u = u.Add(duration)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isFileExist(fileName string) (bool, error) {
|
||||
_, err := os.Stat(fileName)
|
||||
if err != nil {
|
||||
@@ -41,10 +65,3 @@ func (s *Database) metricRUnlock(metricID uint32) {
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func correctToFHD(since, until uint32, firstHourOfDay int) (uint32, uint32) {
|
||||
duration := time.Duration(firstHourOfDay) * time.Hour
|
||||
since = uint32(time.Unix(int64(since), 0).Add(duration).Unix())
|
||||
until = uint32(time.Unix(int64(until), 0).Add(duration).Unix())
|
||||
return since, until
|
||||
}
|
||||
|
||||
1011
database/proc.go
1011
database/proc.go
File diff suppressed because it is too large
Load Diff
BIN
database_linux
BIN
database_linux
Binary file not shown.
BIN
database_windows
BIN
database_windows
Binary file not shown.
14
diploma.go
14
diploma.go
@@ -46,6 +46,20 @@ type ValueDecompressor interface {
|
||||
NextValue() (float64, bool)
|
||||
}
|
||||
|
||||
type MeasureConsumer interface {
|
||||
Feed(uint32, float64)
|
||||
FeedNoSend(uint32, float64)
|
||||
Close() error
|
||||
}
|
||||
|
||||
type WorkerMeasureConsumer interface {
|
||||
FeedNoSend(uint32, float64)
|
||||
}
|
||||
|
||||
type AtreeMeasureConsumer interface {
|
||||
Feed(uint32, float64)
|
||||
}
|
||||
|
||||
type AbortCode int
|
||||
|
||||
const (
|
||||
|
||||
@@ -333,11 +333,22 @@ func execQuery(conn *client.Connection, queryGenerator *RandomQueryGenerator, st
|
||||
}
|
||||
|
||||
case listInstantPeriods:
|
||||
since := time.Unix(int64(recipe.Since), 0)
|
||||
until := time.Unix(int64(recipe.Until), 0)
|
||||
//
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
MetricID: recipe.MetricID,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: recipe.GroupBy,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
@@ -354,12 +365,23 @@ func execQuery(conn *client.Connection, queryGenerator *RandomQueryGenerator, st
|
||||
}
|
||||
|
||||
case listCumulativePeriods:
|
||||
since := time.Unix(int64(recipe.Since), 0)
|
||||
until := time.Unix(int64(recipe.Until), 0)
|
||||
//
|
||||
t1 := time.Now()
|
||||
_, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: recipe.MetricID,
|
||||
Since: recipe.Since,
|
||||
Until: recipe.Until,
|
||||
GroupBy: recipe.GroupBy,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: recipe.GroupBy,
|
||||
})
|
||||
elapsedTime = time.Since(t1)
|
||||
stat.ElapsedTime += elapsedTime
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma/client"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
func GenerateCumulativeMeasures(days int) []client.Measure {
|
||||
func GenerateCumulativeMeasures(days int) []proto.Measure {
|
||||
var (
|
||||
measures []client.Measure
|
||||
measures []proto.Measure
|
||||
minutes = []int{14, 29, 44, 59}
|
||||
hoursPerDay = 24
|
||||
totalHours = days * hoursPerDay
|
||||
@@ -31,7 +31,7 @@ func GenerateCumulativeMeasures(days int) []client.Measure {
|
||||
time.Local,
|
||||
)
|
||||
|
||||
measure := client.Measure{
|
||||
measure := proto.Measure{
|
||||
Timestamp: uint32(measureTime.Unix()),
|
||||
Value: totalValue,
|
||||
}
|
||||
@@ -43,9 +43,9 @@ func GenerateCumulativeMeasures(days int) []client.Measure {
|
||||
return measures
|
||||
}
|
||||
|
||||
func GenerateInstantMeasures(days int, baseValue float64) []client.Measure {
|
||||
func GenerateInstantMeasures(days int, baseValue float64) []proto.Measure {
|
||||
var (
|
||||
measures []client.Measure
|
||||
measures []proto.Measure
|
||||
minutes = []int{14, 29, 44, 59}
|
||||
hoursPerDay = 24
|
||||
totalHours = days * hoursPerDay
|
||||
@@ -70,7 +70,7 @@ func GenerateInstantMeasures(days int, baseValue float64) []client.Measure {
|
||||
fluctuation := baseValue * 0.1
|
||||
value := baseValue + (rand.Float64()*2-1)*fluctuation
|
||||
|
||||
measure := client.Measure{
|
||||
measure := proto.Measure{
|
||||
Timestamp: uint32(measureTime.Unix()),
|
||||
Value: value,
|
||||
}
|
||||
|
||||
@@ -14,8 +14,10 @@ func sendRequests(conn *client.Connection) {
|
||||
var (
|
||||
instantMetricID uint32 = 10000
|
||||
cumulativeMetricID uint32 = 10001
|
||||
fracDigits byte = 2
|
||||
fracDigits int = 2
|
||||
err error
|
||||
|
||||
seriesInDays = 62
|
||||
)
|
||||
|
||||
conn.DeleteMetric(instantMetricID)
|
||||
@@ -23,7 +25,7 @@ func sendRequests(conn *client.Connection) {
|
||||
|
||||
// ADD INSTANT METRIC
|
||||
|
||||
err = conn.AddMetric(client.Metric{
|
||||
err = conn.AddMetric(proto.AddMetricReq{
|
||||
MetricID: instantMetricID,
|
||||
MetricType: diploma.Instant,
|
||||
FracDigits: fracDigits,
|
||||
@@ -51,9 +53,9 @@ GetMetric:
|
||||
|
||||
// APPEND MEASURES
|
||||
|
||||
instantMeasures := GenerateInstantMeasures(62, 220)
|
||||
instantMeasures := GenerateInstantMeasures(seriesInDays, 220)
|
||||
|
||||
err = conn.AppendMeasures(client.AppendMeasuresReq{
|
||||
err = conn.AppendMeasures(proto.AppendMeasuresReq{
|
||||
MetricID: instantMetricID,
|
||||
Measures: instantMeasures,
|
||||
})
|
||||
@@ -78,10 +80,10 @@ GetMetric:
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantMeasures %s - %s:\n",
|
||||
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
|
||||
fmt.Printf("\nListInstantMeasures %s − %s:\n",
|
||||
formatTime(since), formatTime(until))
|
||||
for _, item := range instantList {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
fmt.Printf(" %s => %.2f\n", formatTimestamp(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,70 +95,102 @@ GetMetric:
|
||||
} else {
|
||||
fmt.Printf("\nListAllInstantMeasures (last 15 items):\n")
|
||||
for _, item := range instantList[:15] {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
fmt.Printf(" %s => %.2f\n", formatTimestamp(item.Timestamp), item.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by hour)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.Add(-24 * time.Hour)
|
||||
|
||||
instantPeriods, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
MetricID: instantMetricID,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByHour,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (1 day, group by hour):\n")
|
||||
fmt.Printf("\nListInstantPeriods (%s − %s, group by day):\n",
|
||||
formatDate(since), formatDate(until))
|
||||
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatHourPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n",
|
||||
formatHourPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -7)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.AddDate(0, 0, -6)
|
||||
|
||||
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
MetricID: instantMetricID,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByDay,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (7 days, group by day):\n")
|
||||
fmt.Printf("\nListInstantPeriods (%s − %s, group by day):\n",
|
||||
formatDate(since), formatDate(until))
|
||||
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatDayPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n",
|
||||
formatDayPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST INSTANT PERIODS (group by month)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -62)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.AddDate(0, 0, -seriesInDays)
|
||||
|
||||
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
|
||||
MetricID: instantMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
MetricID: instantMetricID,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByMonth,
|
||||
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListInstantPeriods (62 days, group by month):\n")
|
||||
fmt.Printf("\nListInstantPeriods (%s − %s, group by day):\n",
|
||||
formatMonth(since), formatMonth(until))
|
||||
for _, item := range instantPeriods {
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatMonthPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n",
|
||||
formatMonthPeriod(item.Period), item.Min, item.Max, item.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,7 +216,7 @@ GetMetric:
|
||||
|
||||
// ADD CUMULATIVE METRIC
|
||||
|
||||
err = conn.AddMetric(client.Metric{
|
||||
err = conn.AddMetric(proto.AddMetricReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
MetricType: diploma.Cumulative,
|
||||
FracDigits: fracDigits,
|
||||
@@ -210,9 +244,9 @@ GetMetric:
|
||||
|
||||
// APPEND MEASURES
|
||||
|
||||
cumulativeMeasures := GenerateCumulativeMeasures(62)
|
||||
cumulativeMeasures := GenerateCumulativeMeasures(seriesInDays)
|
||||
|
||||
err = conn.AppendMeasures(client.AppendMeasuresReq{
|
||||
err = conn.AppendMeasures(proto.AppendMeasuresReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Measures: cumulativeMeasures,
|
||||
})
|
||||
@@ -237,11 +271,12 @@ GetMetric:
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativeMeasures: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativeMeasures %s - %s:\n",
|
||||
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
|
||||
fmt.Printf("\nListCumulativeMeasures %s − %s:\n",
|
||||
formatTime(since), formatTime(until))
|
||||
|
||||
for _, item := range cumulativeList {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
fmt.Printf(" %s => %.2f (%.2f)\n",
|
||||
formatTimestamp(item.Timestamp), item.Value, item.Total)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,67 +288,101 @@ GetMetric:
|
||||
} else {
|
||||
fmt.Printf("\nListAllCumulativeMeasures (last 15 items):\n")
|
||||
for _, item := range cumulativeList[:15] {
|
||||
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
|
||||
fmt.Printf(" %s => %.2f (%.2f)\n",
|
||||
formatTimestamp(item.Timestamp), item.Value, item.Total)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by hour)
|
||||
//LIST CUMULATIVE PERIODS (group by hour)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.Add(-24 * time.Hour)
|
||||
|
||||
cumulativePeriods, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByHour,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByHour,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (1 day, group by hour):\n")
|
||||
fmt.Printf("\nListCumulativePeriods (%s − %s, group by hour):\n",
|
||||
formatDate(since), formatDate(until))
|
||||
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatHourPeriod(item.Period), item.EndValue, item.Total)
|
||||
fmt.Printf(" %s => %.2f (%.2f)\n",
|
||||
formatHourPeriod(item.Period), item.EndValue, item.EndValue-item.StartValue)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -7)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.AddDate(0, 0, -6)
|
||||
|
||||
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByDay,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByDay,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (7 days, group by day):\n")
|
||||
fmt.Printf("\nListCumulativePeriods (%s − %s, group by day):\n",
|
||||
formatDate(since), formatDate(until))
|
||||
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatDayPeriod(item.Period), item.EndValue, item.Total)
|
||||
fmt.Printf(" %s => %.2f (%.2f)\n",
|
||||
formatDayPeriod(item.Period), item.EndValue, item.EndValue-item.StartValue)
|
||||
}
|
||||
}
|
||||
|
||||
// LIST CUMULATIVE PERIODS (group by day)
|
||||
|
||||
until = time.Unix(int64(lastTimestamp+1), 0)
|
||||
since = until.AddDate(0, 0, -62)
|
||||
until = time.Unix(int64(lastTimestamp), 0)
|
||||
since = until.AddDate(0, 0, -seriesInDays)
|
||||
|
||||
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
|
||||
MetricID: cumulativeMetricID,
|
||||
Since: uint32(since.Unix()),
|
||||
Until: uint32(until.Unix()),
|
||||
GroupBy: diploma.GroupByMonth,
|
||||
Since: proto.TimeBound{
|
||||
Year: since.Year(),
|
||||
Month: since.Month(),
|
||||
Day: since.Day(),
|
||||
},
|
||||
Until: proto.TimeBound{
|
||||
Year: until.Year(),
|
||||
Month: until.Month(),
|
||||
Day: until.Day(),
|
||||
},
|
||||
GroupBy: diploma.GroupByMonth,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nListCumulativePeriods (62 days, group by month):\n")
|
||||
fmt.Printf("\nListCumulativePeriods (%s − %s, group by month):\n",
|
||||
formatMonth(since), formatMonth(until))
|
||||
|
||||
for _, item := range cumulativePeriods {
|
||||
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatMonthPeriod(item.Period), item.EndValue, item.Total)
|
||||
fmt.Printf(" %s => %.2f (%.2f)\n",
|
||||
formatMonthPeriod(item.Period), item.EndValue, item.EndValue-item.StartValue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -338,11 +407,21 @@ GetMetric:
|
||||
}
|
||||
}
|
||||
|
||||
const datetimeLayout = "2006-01-02 15:04:05"
|
||||
func formatMonth(tm time.Time) string {
|
||||
return tm.Format("2006-01")
|
||||
}
|
||||
|
||||
func formatTime(timestamp uint32) string {
|
||||
func formatDate(tm time.Time) string {
|
||||
return tm.Format("2006-01-02")
|
||||
}
|
||||
|
||||
func formatTime(tm time.Time) string {
|
||||
return tm.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
func formatTimestamp(timestamp uint32) string {
|
||||
tm := time.Unix(int64(timestamp), 0)
|
||||
return tm.Format(datetimeLayout)
|
||||
return tm.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
func formatHourPeriod(period uint32) string {
|
||||
|
||||
1
go.mod
1
go.mod
@@ -5,6 +5,7 @@ go 1.24.0
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring/v2 v2.5.0
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
gordenko.dev/dima/pretty v0.0.0-20221225212746-0c27d8c0ac69
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
2
go.sum
2
go.sum
@@ -18,3 +18,5 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gordenko.dev/dima/pretty v0.0.0-20221225212746-0c27d8c0ac69 h1:nyJ3mzTQ46yUeMZCdLyYcs7B5JCS54c67v84miyhq2E=
|
||||
gordenko.dev/dima/pretty v0.0.0-20221225212746-0c27d8c0ac69/go.mod h1:AxgKDktpqBVyIOhIcP+nlCpK+EsJyjN5kPdqyd8euVU=
|
||||
|
||||
BIN
loadtest_linux
BIN
loadtest_linux
Binary file not shown.
BIN
loadtest_windows
BIN
loadtest_windows
Binary file not shown.
623
proto/proto.go
623
proto/proto.go
@@ -2,24 +2,26 @@ package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
"gordenko.dev/dima/diploma/bufreader"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeDeleteMeasures byte = 1
|
||||
TypeListCurrentValues byte = 2
|
||||
TypeListInstantMeasures byte = 3
|
||||
TypeListCumulativeMeasures byte = 33
|
||||
TypeListInstantPeriods byte = 4
|
||||
TypeListCumulativePeriods byte = 44
|
||||
TypeGetMetric byte = 5
|
||||
TypeAddMetric byte = 6
|
||||
TypeDeleteMeasures byte = 1
|
||||
TypeListCurrentValues byte = 2
|
||||
TypeListInstantMeasures byte = 3
|
||||
TypeListCumulativeMeasures byte = 33
|
||||
|
||||
TypeListInstantPeriods byte = 4
|
||||
TypeListCumulativePeriods byte = 44
|
||||
TypeGetMetric byte = 5
|
||||
TypeAddMetric byte = 6
|
||||
|
||||
TypeListAllInstantMeasures byte = 8
|
||||
TypeListAllCumulativeMeasures byte = 88
|
||||
TypeRangeTotal byte = 9
|
||||
TypeAppendMeasure byte = 10
|
||||
TypeAppendMeasures byte = 11
|
||||
TypeDeleteMetric byte = 12
|
||||
@@ -66,214 +68,65 @@ func ErrorCodeToText(code uint16) string {
|
||||
}
|
||||
}
|
||||
|
||||
type GetMetricReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
// common
|
||||
|
||||
type ListCurrentValuesReq struct {
|
||||
MetricIDs []uint32
|
||||
}
|
||||
|
||||
type AddMetricReq struct {
|
||||
type Metric struct {
|
||||
MetricID uint32
|
||||
MetricType octopus.MetricType
|
||||
MetricType diploma.MetricType
|
||||
FracDigits int
|
||||
}
|
||||
|
||||
type UpdateMetricReq struct {
|
||||
MetricID uint32
|
||||
MetricType octopus.MetricType
|
||||
FracDigits int
|
||||
type AppendError struct {
|
||||
MetricID uint32
|
||||
ErrorCode uint16
|
||||
}
|
||||
|
||||
type DeleteMetricReq struct {
|
||||
MetricID uint32
|
||||
type TimeBound struct {
|
||||
Year int
|
||||
Month time.Month
|
||||
Day int
|
||||
}
|
||||
|
||||
type DeleteMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32 // timestamp (optional)
|
||||
type CumulativeMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
type AppendMeasureReq struct {
|
||||
type CumulativePeriod struct {
|
||||
Period uint32
|
||||
Start uint32
|
||||
End uint32
|
||||
StartValue float64
|
||||
EndValue float64
|
||||
}
|
||||
|
||||
type InstantMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type InstantPeriod struct {
|
||||
Period uint32
|
||||
Start uint32
|
||||
End uint32
|
||||
Min float64
|
||||
Max float64
|
||||
Avg float64
|
||||
}
|
||||
|
||||
type CurrentValue struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type ListAllInstantMetricMeasuresReq struct {
|
||||
// API reqs
|
||||
|
||||
type GetMetricReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
|
||||
type ListAllCumulativeMeasuresReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
|
||||
type ListInstantMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
type ListCumulativeMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
type ListInstantPeriodsReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
AggregateFuncs byte
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
type ListCumulativePeriodsReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
MetricID uint32
|
||||
MetricType octopus.MetricType
|
||||
FracDigits int
|
||||
}
|
||||
|
||||
type RangeTotalReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func PackAddMetricReq(req AddMetricReq) []byte {
|
||||
arr := []byte{
|
||||
TypeAddMetric,
|
||||
0, 0, 0, 0, //
|
||||
byte(req.MetricType),
|
||||
byte(req.FracDigits),
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
return arr
|
||||
}
|
||||
|
||||
func PackDeleteMetricReq(req DeleteMetricReq) []byte {
|
||||
arr := []byte{
|
||||
TypeDeleteMetric,
|
||||
0, 0, 0, 0, // metricID
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
return arr
|
||||
}
|
||||
|
||||
func PackAppendMeasure(req AppendMeasureReq) []byte {
|
||||
arr := []byte{
|
||||
TypeAppendMeasure,
|
||||
0, 0, 0, 0, // metricID
|
||||
0, 0, 0, 0, // timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // value
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], uint32(req.Timestamp))
|
||||
bin.PutFloat64(arr[9:], req.Value)
|
||||
return arr
|
||||
}
|
||||
|
||||
func PackDeleteMeasuresReq(req DeleteMeasuresReq) []byte {
|
||||
arr := []byte{
|
||||
TypeDeleteMeasures,
|
||||
0, 0, 0, 0, // metricID
|
||||
0, 0, 0, 0, // since
|
||||
}
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint32(arr[5:], uint32(req.Since))
|
||||
return arr
|
||||
}
|
||||
|
||||
// UNPACK reqs
|
||||
|
||||
func UnpackAddMetricReq(arr []byte) (m AddMetricReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.MetricType = octopus.MetricType(arr[4])
|
||||
m.FracDigits = int(arr[5])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackUpdateMetricReq(arr []byte) (m UpdateMetricReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.MetricType = octopus.MetricType(arr[4])
|
||||
m.FracDigits = int(arr[5])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackDeleteMetricReq(arr []byte) (m DeleteMetricReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackAppendMeasureReq(arr []byte) (m AppendMeasureReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Timestamp = bin.GetUint32(arr[4:])
|
||||
m.Value = bin.GetFloat64(arr[8:])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackDeleteMeasuresReq(arr []byte) (m DeleteMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackListInstantMeasuresReq(arr []byte) (m ListInstantMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr[0:])
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
m.FirstHourOfDay = int(arr[12])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackListCumulativeMeasuresReq(arr []byte) (m ListCumulativeMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
m.FirstHourOfDay = int(arr[12])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackListInstantPeriodsReq(arr []byte) (m ListInstantPeriodsReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
m.GroupBy = octopus.GroupBy(arr[12])
|
||||
m.AggregateFuncs = arr[13]
|
||||
m.FirstHourOfDay = int(arr[14])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackListCumulativePeriodsReq(arr []byte) (m ListCumulativePeriodsReq) {
|
||||
m.MetricID = bin.GetUint32(arr[0:])
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
m.GroupBy = octopus.GroupBy(arr[12])
|
||||
m.FirstHourOfDay = int(arr[13])
|
||||
return
|
||||
}
|
||||
|
||||
func UnpackRangeTotalReq(arr []byte) (m RangeTotalReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
return
|
||||
}
|
||||
|
||||
// READ reqs
|
||||
|
||||
func ReadGetMetricReq(r *bufreader.BufferedReader) (m GetMetricReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
@@ -283,112 +136,8 @@ func ReadGetMetricReq(r *bufreader.BufferedReader) (m GetMetricReq, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func ReadAddMetricReq(r *bufreader.BufferedReader) (m AddMetricReq, err error) {
|
||||
arr, err := r.ReadN(6)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackAddMetricReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadUpdateMetricReq(r *bufreader.BufferedReader) (m UpdateMetricReq, err error) {
|
||||
arr, err := r.ReadN(6)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackUpdateMetricReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadDeleteMetricReq(r *bufreader.BufferedReader) (m DeleteMetricReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ReadAppendMeasureReq(r *bufreader.BufferedReader) (m AppendMeasureReq, err error) {
|
||||
arr, err := r.ReadN(16)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackAppendMeasureReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadDeleteMeasuresReq(r *bufreader.BufferedReader) (m DeleteMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(8)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackDeleteMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadListAllInstantMeasuresReq(r *bufreader.BufferedReader) (m ListAllInstantMetricMeasuresReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ReadListAllCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListAllCumulativeMeasuresReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ReadListInstantMeasuresReq(r *bufreader.BufferedReader) (m ListInstantMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(13)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListInstantMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadListCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListCumulativeMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(13)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListCumulativeMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadListInstantPeriodsReq(r *bufreader.BufferedReader) (m ListInstantPeriodsReq, err error) {
|
||||
arr, err := r.ReadN(15)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListInstantPeriodsReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadListCumulativePeriodsReq(r *bufreader.BufferedReader) (m ListCumulativePeriodsReq, err error) {
|
||||
arr, err := r.ReadN(14)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListCumulativePeriodsReq(arr), nil
|
||||
}
|
||||
|
||||
func ReadRangeTotalReq(r *bufreader.BufferedReader) (m RangeTotalReq, err error) {
|
||||
arr, err := r.ReadN(12)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackRangeTotalReq(arr), nil
|
||||
type ListCurrentValuesReq struct {
|
||||
MetricIDs []uint32
|
||||
}
|
||||
|
||||
func ReadListCurrentValuesReq(r *bufreader.BufferedReader) (m ListCurrentValuesReq, err error) {
|
||||
@@ -410,6 +159,83 @@ func ReadListCurrentValuesReq(r *bufreader.BufferedReader) (m ListCurrentValuesR
|
||||
return
|
||||
}
|
||||
|
||||
type AddMetricReq struct {
|
||||
MetricID uint32
|
||||
MetricType diploma.MetricType
|
||||
FracDigits int
|
||||
}
|
||||
|
||||
func ReadAddMetricReq(r *bufreader.BufferedReader) (m AddMetricReq, err error) {
|
||||
arr, err := r.ReadN(6)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackAddMetricReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackAddMetricReq(arr []byte) (m AddMetricReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.MetricType = diploma.MetricType(arr[4])
|
||||
m.FracDigits = int(arr[5])
|
||||
return
|
||||
}
|
||||
|
||||
type DeleteMetricReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
|
||||
func ReadDeleteMetricReq(r *bufreader.BufferedReader) (m DeleteMetricReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DeleteMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32 // timestamp (optional)
|
||||
}
|
||||
|
||||
func ReadDeleteMeasuresReq(r *bufreader.BufferedReader) (m DeleteMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(8)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackDeleteMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackDeleteMeasuresReq(arr []byte) (m DeleteMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
return
|
||||
}
|
||||
|
||||
type AppendMeasureReq struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func ReadAppendMeasureReq(r *bufreader.BufferedReader) (m AppendMeasureReq, err error) {
|
||||
arr, err := r.ReadN(16)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackAppendMeasureReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackAppendMeasureReq(arr []byte) (m AppendMeasureReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Timestamp = bin.GetUint32(arr[4:])
|
||||
m.Value = bin.GetFloat64(arr[8:])
|
||||
return
|
||||
}
|
||||
|
||||
type AppendMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Measures []Measure
|
||||
@@ -420,27 +246,6 @@ type Measure struct {
|
||||
Value float64
|
||||
}
|
||||
|
||||
func PackAppendMeasures(req AppendMeasuresReq) []byte {
|
||||
if len(req.Measures) > 65535 {
|
||||
panic(fmt.Errorf("wrong measures qty: %d", len(req.Measures)))
|
||||
}
|
||||
var (
|
||||
prefixSize = 7
|
||||
recordSize = 12
|
||||
arr = make([]byte, prefixSize+len(req.Measures)*recordSize)
|
||||
)
|
||||
arr[0] = TypeAppendMeasures
|
||||
bin.PutUint32(arr[1:], req.MetricID)
|
||||
bin.PutUint16(arr[5:], uint16(len(req.Measures)))
|
||||
pos := prefixSize
|
||||
for _, measure := range req.Measures {
|
||||
bin.PutUint32(arr[pos:], measure.Timestamp)
|
||||
bin.PutFloat64(arr[pos+4:], measure.Value)
|
||||
pos += recordSize
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
func ReadAppendMeasuresReq(r *bufreader.BufferedReader) (m AppendMeasuresReq, err error) {
|
||||
prefix, err := bin.ReadN(r, 6) // metricID + measures qty
|
||||
if err != nil {
|
||||
@@ -467,3 +272,169 @@ func ReadAppendMeasuresReq(r *bufreader.BufferedReader) (m AppendMeasuresReq, er
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type MetricMeasure struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
func ReadAppendMeasurePerMetricReq(r *bufreader.BufferedReader) (measures []MetricMeasure, err error) {
|
||||
qty, err := bin.ReadUint16(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var tmp = make([]byte, 16)
|
||||
for range int(qty) {
|
||||
err = bin.ReadNInto(r, tmp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
measures = append(measures, MetricMeasure{
|
||||
MetricID: bin.GetUint32(tmp[0:]),
|
||||
Timestamp: bin.GetUint32(tmp[4:]),
|
||||
Value: bin.GetFloat64(tmp[8:]),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ListAllInstantMetricMeasuresReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
|
||||
func ReadListAllInstantMeasuresReq(r *bufreader.BufferedReader) (m ListAllInstantMetricMeasuresReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ListAllCumulativeMeasuresReq struct {
|
||||
MetricID uint32
|
||||
}
|
||||
|
||||
func ReadListAllCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListAllCumulativeMeasuresReq, err error) {
|
||||
m.MetricID, err = bin.ReadUint32(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ListInstantMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func ReadListInstantMeasuresReq(r *bufreader.BufferedReader) (m ListInstantMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(12)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListInstantMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackListInstantMeasuresReq(arr []byte) (m ListInstantMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr[0:])
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
return
|
||||
}
|
||||
|
||||
type ListCumulativeMeasuresReq struct {
|
||||
MetricID uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func ReadListCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListCumulativeMeasuresReq, err error) {
|
||||
arr, err := r.ReadN(12)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListCumulativeMeasuresReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackListCumulativeMeasuresReq(arr []byte) (m ListCumulativeMeasuresReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = bin.GetUint32(arr[4:])
|
||||
m.Until = bin.GetUint32(arr[8:])
|
||||
return
|
||||
}
|
||||
|
||||
type ListInstantPeriodsReq struct {
|
||||
MetricID uint32
|
||||
Since TimeBound
|
||||
Until TimeBound
|
||||
GroupBy diploma.GroupBy
|
||||
AggregateFuncs byte
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func ReadListInstantPeriodsReq(r *bufreader.BufferedReader) (m ListInstantPeriodsReq, err error) {
|
||||
arr, err := r.ReadN(15)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListInstantPeriodsReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackListInstantPeriodsReq(arr []byte) (m ListInstantPeriodsReq) {
|
||||
m.MetricID = bin.GetUint32(arr)
|
||||
m.Since = TimeBound{
|
||||
Year: int(bin.GetUint16(arr[4:])),
|
||||
Month: time.Month(arr[6]),
|
||||
Day: int(arr[7]),
|
||||
}
|
||||
m.Until = TimeBound{
|
||||
Year: int(bin.GetUint16(arr[8:])),
|
||||
Month: time.Month(arr[10]),
|
||||
Day: int(arr[11]),
|
||||
}
|
||||
m.GroupBy = diploma.GroupBy(arr[12])
|
||||
m.AggregateFuncs = arr[13]
|
||||
m.FirstHourOfDay = int(arr[14])
|
||||
return
|
||||
}
|
||||
|
||||
type ListCumulativePeriodsReq struct {
|
||||
MetricID uint32
|
||||
Since TimeBound
|
||||
Until TimeBound
|
||||
GroupBy diploma.GroupBy
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func ReadListCumulativePeriodsReq(r *bufreader.BufferedReader) (m ListCumulativePeriodsReq, err error) {
|
||||
arr, err := r.ReadN(14)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req: %s", err)
|
||||
return
|
||||
}
|
||||
return UnpackListCumulativePeriodsReq(arr), nil
|
||||
}
|
||||
|
||||
func UnpackListCumulativePeriodsReq(arr []byte) (m ListCumulativePeriodsReq) {
|
||||
m.MetricID = bin.GetUint32(arr[0:])
|
||||
m.Since = TimeBound{
|
||||
Year: int(bin.GetUint16(arr[4:])),
|
||||
Month: time.Month(arr[6]),
|
||||
Day: int(arr[7]),
|
||||
}
|
||||
m.Until = TimeBound{
|
||||
Year: int(bin.GetUint16(arr[8:])),
|
||||
Month: time.Month(arr[10]),
|
||||
Day: int(arr[11]),
|
||||
}
|
||||
m.GroupBy = diploma.GroupBy(arr[12])
|
||||
m.FirstHourOfDay = int(arr[13])
|
||||
return
|
||||
}
|
||||
|
||||
BIN
requests_linux
BIN
requests_linux
Binary file not shown.
BIN
requests_windows
BIN
requests_windows
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
372
transform/aggregate.go
Normal file
372
transform/aggregate.go
Normal file
@@ -0,0 +1,372 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
"gordenko.dev/dima/diploma/timeutil"
|
||||
)
|
||||
|
||||
// INSTANT
|
||||
|
||||
type InstantPeriodsWriterOptions struct {
|
||||
Dst io.Writer
|
||||
GroupBy diploma.GroupBy
|
||||
Since uint32
|
||||
AggregateFuncs byte
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
type InstantPeriodsWriter struct {
|
||||
aggregateFuncs byte
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
groupBy diploma.GroupBy
|
||||
since uint32
|
||||
firstHourOfDay int
|
||||
time2period func(uint32) time.Time
|
||||
currentPeriod time.Time
|
||||
lastTimestamp uint32
|
||||
endTimestamp uint32
|
||||
min float64
|
||||
max float64
|
||||
total float64
|
||||
entries int
|
||||
}
|
||||
|
||||
func NewInstantPeriodsWriter(opt InstantPeriodsWriterOptions) (*InstantPeriodsWriter, error) {
|
||||
if opt.Dst == nil {
|
||||
return nil, errors.New("Dst option is required")
|
||||
}
|
||||
if opt.FirstHourOfDay < 0 || opt.FirstHourOfDay > 23 {
|
||||
return nil, fmt.Errorf("wrong FirstHourOfDay option: %d", opt.FirstHourOfDay)
|
||||
}
|
||||
var q int
|
||||
if (opt.AggregateFuncs & diploma.AggregateMin) == diploma.AggregateMin {
|
||||
q++
|
||||
}
|
||||
if (opt.AggregateFuncs & diploma.AggregateMax) == diploma.AggregateMax {
|
||||
q++
|
||||
}
|
||||
if (opt.AggregateFuncs & diploma.AggregateAvg) == diploma.AggregateAvg {
|
||||
q++
|
||||
}
|
||||
|
||||
if q == 0 {
|
||||
return nil, errors.New("AggregateFuncs option is required")
|
||||
}
|
||||
|
||||
s := &InstantPeriodsWriter{
|
||||
aggregateFuncs: opt.AggregateFuncs,
|
||||
arr: make([]byte, 12+q*8),
|
||||
responder: NewChunkedResponder(opt.Dst),
|
||||
groupBy: opt.GroupBy,
|
||||
since: opt.Since,
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) groupByDayUsingFHD(timestamp uint32) time.Time {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) groupByMonthUsingFHD(timestamp uint32) time.Time {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) Feed(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, false)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) FeedNoSend(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, true)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) feed(timestamp uint32, value float64, isBuffer bool) {
|
||||
if s.entries > 0 {
|
||||
period := s.time2period(timestamp)
|
||||
if period != s.currentPeriod {
|
||||
s.packPeriod(timestamp)
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
s.decrementPeriod()
|
||||
for period.Before(s.currentPeriod) {
|
||||
s.packBlankPeriod()
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
s.decrementPeriod()
|
||||
}
|
||||
s.endTimestamp = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
} else {
|
||||
if value < s.min {
|
||||
s.min = value
|
||||
} else if value > s.max {
|
||||
s.max = value
|
||||
}
|
||||
s.total += value
|
||||
s.entries++
|
||||
}
|
||||
} else {
|
||||
s.endTimestamp = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
s.currentPeriod = s.time2period(timestamp)
|
||||
}
|
||||
s.lastTimestamp = timestamp
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) decrementPeriod() {
|
||||
switch s.groupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.currentPeriod = s.currentPeriod.Add(-1 * time.Hour)
|
||||
case diploma.GroupByDay:
|
||||
s.currentPeriod = s.currentPeriod.AddDate(0, 0, -1)
|
||||
case diploma.GroupByMonth:
|
||||
s.currentPeriod = s.currentPeriod.AddDate(0, -1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) packBlankPeriod() {
|
||||
bin.PutUint32(s.arr[0:], uint32(s.currentPeriod.Unix()))
|
||||
for i := 4; i < len(s.arr); i++ {
|
||||
s.arr[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) Close() (err error) {
|
||||
if s.entries > 0 {
|
||||
if s.lastTimestamp >= s.since {
|
||||
s.packPeriod(s.lastTimestamp)
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
}
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) packPeriod(timestamp uint32) {
|
||||
bin.PutUint32(s.arr[0:], uint32(s.currentPeriod.Unix()))
|
||||
bin.PutUint32(s.arr[4:], timestamp)
|
||||
bin.PutUint32(s.arr[8:], s.endTimestamp)
|
||||
|
||||
pos := 12
|
||||
if (s.aggregateFuncs & diploma.AggregateMin) == diploma.AggregateMin {
|
||||
bin.PutFloat64(s.arr[pos:], s.min)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & diploma.AggregateMax) == diploma.AggregateMax {
|
||||
bin.PutFloat64(s.arr[pos:], s.max)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & diploma.AggregateAvg) == diploma.AggregateAvg {
|
||||
bin.PutFloat64(s.arr[pos:], s.total/float64(s.entries))
|
||||
}
|
||||
}
|
||||
|
||||
type CumulativePeriodsWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
since uint32
|
||||
firstHourOfDay int
|
||||
currentPeriod time.Time
|
||||
groupBy diploma.GroupBy
|
||||
time2period func(uint32) time.Time
|
||||
endTimestamp uint32
|
||||
endValue float64
|
||||
lastTimestamp uint32
|
||||
lastValue float64
|
||||
}
|
||||
|
||||
type CumulativePeriodsWriterOptions struct {
|
||||
Dst io.Writer
|
||||
GroupBy diploma.GroupBy
|
||||
Since uint32
|
||||
FirstHourOfDay int
|
||||
}
|
||||
|
||||
func NewCumulativePeriodsWriter(opt CumulativePeriodsWriterOptions) (*CumulativePeriodsWriter, error) {
|
||||
if opt.Dst == nil {
|
||||
return nil, errors.New("Dst option is required")
|
||||
}
|
||||
if opt.FirstHourOfDay < 0 || opt.FirstHourOfDay > 23 {
|
||||
return nil, fmt.Errorf("wrong firstHourOfDay option: %d", opt.FirstHourOfDay)
|
||||
}
|
||||
|
||||
s := &CumulativePeriodsWriter{
|
||||
arr: make([]byte, 28),
|
||||
responder: NewChunkedResponder(opt.Dst),
|
||||
since: opt.Since,
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
groupBy: opt.GroupBy,
|
||||
}
|
||||
|
||||
s.time2period = func(timestamp uint32) time.Time {
|
||||
return timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "h")
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) groupByDayUsingFHD(timestamp uint32) time.Time {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) groupByMonthUsingFHD(timestamp uint32) time.Time {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) Feed(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, false)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) FeedNoSend(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, true)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) feed(timestamp uint32, value float64, isBuffer bool) {
|
||||
if s.endTimestamp > 0 {
|
||||
period := s.time2period(timestamp)
|
||||
if period != s.currentPeriod {
|
||||
s.packPeriod(timestamp, value)
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
s.decrementPeriod()
|
||||
for period.Before(s.currentPeriod) {
|
||||
// вставляю пустышку
|
||||
s.packBlankPeriod()
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
s.decrementPeriod()
|
||||
}
|
||||
s.endTimestamp = timestamp
|
||||
s.endValue = value
|
||||
}
|
||||
} else {
|
||||
s.endTimestamp = timestamp
|
||||
s.endValue = value
|
||||
s.currentPeriod = s.time2period(timestamp)
|
||||
}
|
||||
s.lastTimestamp = timestamp
|
||||
s.lastValue = value
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) decrementPeriod() {
|
||||
switch s.groupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.currentPeriod = s.currentPeriod.Add(-1 * time.Hour)
|
||||
case diploma.GroupByDay:
|
||||
s.currentPeriod = s.currentPeriod.AddDate(0, 0, -1)
|
||||
case diploma.GroupByMonth:
|
||||
s.currentPeriod = s.currentPeriod.AddDate(0, -1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) packBlankPeriod() {
|
||||
bin.PutUint32(s.arr[0:], uint32(s.currentPeriod.Unix()))
|
||||
for i := 4; i < len(s.arr); i++ {
|
||||
s.arr[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) packPeriod(start uint32, startValue float64) {
|
||||
bin.PutUint32(s.arr[0:], uint32(s.currentPeriod.Unix()))
|
||||
bin.PutUint32(s.arr[4:], start)
|
||||
bin.PutUint32(s.arr[8:], s.endTimestamp)
|
||||
bin.PutFloat64(s.arr[12:], startValue)
|
||||
bin.PutFloat64(s.arr[20:], s.endValue)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) Close() error {
|
||||
if s.endTimestamp > 0 {
|
||||
if s.endTimestamp >= s.since {
|
||||
if s.lastTimestamp != s.endTimestamp {
|
||||
s.packPeriod(s.lastTimestamp, s.lastValue)
|
||||
} else {
|
||||
s.packPeriod(s.endTimestamp, s.endValue)
|
||||
}
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
}
|
||||
return s.responder.Flush()
|
||||
}
|
||||
143
transform/raw.go
Normal file
143
transform/raw.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
)
|
||||
|
||||
// CURRENT VALUE WRITER
|
||||
|
||||
type CurrentValue struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type CurrentValueWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCurrentValueWriter(dst io.Writer) *CurrentValueWriter {
|
||||
return &CurrentValueWriter{
|
||||
arr: make([]byte, 16),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) BufferValue(m CurrentValue) {
|
||||
bin.PutUint32(s.arr[0:], m.MetricID)
|
||||
bin.PutUint32(s.arr[4:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[8:], m.Value)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// INSTANT MEASURE WRITER
|
||||
|
||||
type InstantMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type InstantMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
since uint32
|
||||
}
|
||||
|
||||
func NewInstantMeasureWriter(dst io.Writer, since uint32) *InstantMeasureWriter {
|
||||
return &InstantMeasureWriter{
|
||||
arr: make([]byte, 12),
|
||||
responder: NewChunkedResponder(dst),
|
||||
since: since,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) Feed(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, false)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) FeedNoSend(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, true)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) feed(timestamp uint32, value float64, isBuffer bool) {
|
||||
if timestamp < s.since {
|
||||
return
|
||||
}
|
||||
bin.PutUint32(s.arr[0:], timestamp)
|
||||
bin.PutFloat64(s.arr[4:], value)
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// CUMULATIVE MEASURE WRITER
|
||||
|
||||
type CumulativeMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
type CumulativeMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
since uint32
|
||||
endTimestamp uint32
|
||||
endValue float64
|
||||
}
|
||||
|
||||
func NewCumulativeMeasureWriter(dst io.Writer, since uint32) *CumulativeMeasureWriter {
|
||||
return &CumulativeMeasureWriter{
|
||||
arr: make([]byte, 20),
|
||||
responder: NewChunkedResponder(dst),
|
||||
since: since,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) Feed(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, false)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) FeedNoSend(timestamp uint32, value float64) {
|
||||
s.feed(timestamp, value, true)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) feed(timestamp uint32, value float64, isBuffer bool) {
|
||||
if s.endTimestamp > 0 {
|
||||
s.pack(s.endValue - value)
|
||||
if isBuffer {
|
||||
s.responder.BufferRecord(s.arr)
|
||||
} else {
|
||||
s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
}
|
||||
s.endTimestamp = timestamp
|
||||
s.endValue = value
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) pack(total float64) {
|
||||
bin.PutUint32(s.arr[0:], s.endTimestamp)
|
||||
bin.PutFloat64(s.arr[4:], s.endValue)
|
||||
bin.PutFloat64(s.arr[12:], total)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) Close() error {
|
||||
if s.endTimestamp >= s.since {
|
||||
s.pack(0)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
return s.responder.Flush()
|
||||
}
|
||||
89
transform/responder.go
Normal file
89
transform/responder.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
// CHUNKED RESPONDER
|
||||
|
||||
var endMsg = []byte{
|
||||
proto.RespEndOfValue, // end of stream
|
||||
}
|
||||
|
||||
type ChunkedResponder struct {
|
||||
recordsQty int
|
||||
buf *bytes.Buffer
|
||||
dst io.Writer
|
||||
}
|
||||
|
||||
func NewChunkedResponder(dst io.Writer) *ChunkedResponder {
|
||||
s := &ChunkedResponder{
|
||||
recordsQty: 0,
|
||||
buf: bytes.NewBuffer(nil),
|
||||
dst: dst,
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) BufferRecord(rec []byte) {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) AppendRecord(rec []byte) error {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
|
||||
if s.buf.Len() < 1500 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
s.recordsQty = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) Flush() error {
|
||||
if s.recordsQty > 0 {
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.dst.Write(endMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) sendBuffered() (err error) {
|
||||
msg := s.buf.Bytes()
|
||||
bin.PutUint32(msg[1:], uint32(s.recordsQty))
|
||||
|
||||
n, err := s.dst.Write(msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(msg) {
|
||||
return fmt.Errorf("incomplete write %d bytes instead of %d", n, len(msg))
|
||||
}
|
||||
s.buf.Reset()
|
||||
return
|
||||
}
|
||||
19
transform/transform.go
Normal file
19
transform/transform.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma/timeutil"
|
||||
)
|
||||
|
||||
func groupByHour(timestamp uint32) time.Time {
|
||||
return timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "h")
|
||||
}
|
||||
|
||||
func groupByDay(timestamp uint32) time.Time {
|
||||
return timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
}
|
||||
|
||||
func groupByMonth(timestamp uint32) time.Time {
|
||||
return timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
}
|
||||
Reference in New Issue
Block a user