1st
This commit is contained in:
@@ -1,325 +0,0 @@
|
||||
package atree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/timeutil"
|
||||
)
|
||||
|
||||
// AGGREGATE
|
||||
|
||||
type InstantAggregator struct {
|
||||
firstHourOfDay int
|
||||
lastDayOfMonth int
|
||||
time2period func(uint32) uint32
|
||||
currentPeriod uint32
|
||||
since uint32
|
||||
until uint32
|
||||
min float64
|
||||
max float64
|
||||
total float64
|
||||
entries int
|
||||
}
|
||||
|
||||
type InstantAggregatorOptions struct {
|
||||
GroupBy diploma.GroupBy
|
||||
FirstHourOfDay int
|
||||
LastDayOfMonth int
|
||||
}
|
||||
|
||||
func NewInstantAggregator(opt InstantAggregatorOptions) (*InstantAggregator, error) {
|
||||
s := &InstantAggregator{
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
lastDayOfMonth: opt.LastDayOfMonth,
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
if s.lastDayOfMonth > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHDAndLDM
|
||||
} else {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
}
|
||||
} else {
|
||||
if s.lastDayOfMonth > 0 {
|
||||
s.time2period = s.groupByMonthUsingLDM
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Приходят данные от свежих к старым, тоесть сперва получаю Until.
|
||||
// return period complete flag
|
||||
func (s *InstantAggregator) Feed(timestamp uint32, value float64, p *InstantPeriod) bool {
|
||||
period := s.time2period(timestamp)
|
||||
//fmt.Printf("feed: %s %v, period: %s\n", time.Unix(int64(timestamp), 0), value, time.Unix(int64(period), 0))
|
||||
if s.entries == 0 {
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
return false
|
||||
}
|
||||
|
||||
if period != s.currentPeriod {
|
||||
// готовый период
|
||||
s.FillPeriod(timestamp, p)
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.min = value
|
||||
s.max = value
|
||||
s.total = value
|
||||
s.entries = 1
|
||||
return true
|
||||
}
|
||||
|
||||
if value < s.min {
|
||||
s.min = value
|
||||
} else if value > s.max {
|
||||
s.max = value
|
||||
}
|
||||
// для подсчета AVG
|
||||
s.total += value
|
||||
s.entries++
|
||||
// начало периода
|
||||
s.since = timestamp
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) FillPeriod(prevTimestamp uint32, p *InstantPeriod) bool {
|
||||
if s.entries == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
//fmt.Printf("FillPeriod: %s, prevTimestamp: %s\n", time.Unix(int64(s.currentPeriod), 0), time.Unix(int64(prevTimestamp), 0))
|
||||
p.Period = s.currentPeriod
|
||||
if prevTimestamp > 0 {
|
||||
p.Since = prevTimestamp
|
||||
} else {
|
||||
p.Since = s.since
|
||||
}
|
||||
p.Until = s.until
|
||||
p.Min = s.min
|
||||
p.Max = s.max
|
||||
p.Avg = s.total / float64(s.entries)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByMonthUsingLDM(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Day() > s.lastDayOfMonth {
|
||||
tm = tm.AddDate(0, 1, 0)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *InstantAggregator) groupByMonthUsingFHDAndLDM(timestamp uint32) uint32 {
|
||||
// ВАЖНО!
|
||||
// Сперва проверяю время.
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
if tm.Day() > s.lastDayOfMonth {
|
||||
tm = tm.AddDate(0, 1, 0)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
// CUMULATIVE
|
||||
|
||||
type CumulativeAggregator struct {
|
||||
firstHourOfDay int
|
||||
lastDayOfMonth int
|
||||
time2period func(uint32) uint32
|
||||
currentPeriod uint32
|
||||
since uint32
|
||||
until uint32
|
||||
sinceValue float64
|
||||
untilValue float64
|
||||
entries int
|
||||
}
|
||||
|
||||
type CumulativeAggregatorOptions struct {
|
||||
GroupBy diploma.GroupBy
|
||||
FirstHourOfDay int
|
||||
LastDayOfMonth int
|
||||
}
|
||||
|
||||
func NewCumulativeAggregator(opt CumulativeAggregatorOptions) (*CumulativeAggregator, error) {
|
||||
s := &CumulativeAggregator{
|
||||
firstHourOfDay: opt.FirstHourOfDay,
|
||||
lastDayOfMonth: opt.LastDayOfMonth,
|
||||
}
|
||||
|
||||
switch opt.GroupBy {
|
||||
case diploma.GroupByHour:
|
||||
s.time2period = groupByHour
|
||||
|
||||
case diploma.GroupByDay:
|
||||
if s.firstHourOfDay > 0 {
|
||||
s.time2period = s.groupByDayUsingFHD
|
||||
} else {
|
||||
s.time2period = groupByDay
|
||||
}
|
||||
|
||||
case diploma.GroupByMonth:
|
||||
if s.firstHourOfDay > 0 {
|
||||
if s.lastDayOfMonth > 0 {
|
||||
s.time2period = s.groupByMonthUsingFHDAndLDM
|
||||
} else {
|
||||
s.time2period = s.groupByMonthUsingFHD
|
||||
}
|
||||
} else {
|
||||
if s.lastDayOfMonth > 0 {
|
||||
s.time2period = s.groupByMonthUsingLDM
|
||||
} else {
|
||||
s.time2period = groupByMonth
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// return period complete flag
|
||||
func (s *CumulativeAggregator) Feed(timestamp uint32, value float64, p *CumulativePeriod) bool {
|
||||
period := s.time2period(timestamp)
|
||||
if s.entries == 0 {
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.sinceValue = value
|
||||
s.untilValue = value
|
||||
s.entries = 1
|
||||
return false
|
||||
}
|
||||
|
||||
if period != s.currentPeriod {
|
||||
// готовый период
|
||||
s.FillPeriod(timestamp, value, p)
|
||||
s.currentPeriod = period
|
||||
s.since = timestamp
|
||||
s.until = timestamp
|
||||
s.sinceValue = value
|
||||
s.untilValue = value
|
||||
s.entries = 1
|
||||
return true
|
||||
}
|
||||
|
||||
// начало периода
|
||||
s.since = timestamp
|
||||
s.sinceValue = value
|
||||
s.entries++
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) FillPeriod(prevTimestamp uint32, value float64, p *CumulativePeriod) bool {
|
||||
if s.entries == 0 {
|
||||
return false
|
||||
}
|
||||
p.Period = s.currentPeriod
|
||||
if prevTimestamp > 0 {
|
||||
p.Since = prevTimestamp
|
||||
p.Total = s.untilValue - value
|
||||
} else {
|
||||
p.Since = s.since
|
||||
p.Total = s.untilValue - s.sinceValue
|
||||
}
|
||||
p.Until = s.until
|
||||
p.EndValue = s.untilValue
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByMonthUsingLDM(timestamp uint32) uint32 {
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Day() > s.lastDayOfMonth {
|
||||
tm = tm.AddDate(0, 1, 0)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func (s *CumulativeAggregator) groupByMonthUsingFHDAndLDM(timestamp uint32) uint32 {
|
||||
// ВАЖНО!
|
||||
// Сперва проверяю время.
|
||||
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
|
||||
if tm.Hour() < s.firstHourOfDay {
|
||||
tm = tm.AddDate(0, 0, -1)
|
||||
}
|
||||
if tm.Day() > s.lastDayOfMonth {
|
||||
tm = tm.AddDate(0, 1, 0)
|
||||
}
|
||||
return uint32(tm.Unix())
|
||||
}
|
||||
|
||||
func groupByHour(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "h").Unix())
|
||||
}
|
||||
|
||||
func groupByDay(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d").Unix())
|
||||
}
|
||||
|
||||
func groupByMonth(timestamp uint32) uint32 {
|
||||
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m").Unix())
|
||||
}
|
||||
16
atree/io.go
16
atree/io.go
@@ -8,7 +8,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
diploma "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/atree/redo"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
)
|
||||
@@ -74,8 +74,8 @@ func (s *Atree) releaseIndexPage(pageNo uint32) {
|
||||
p.ReferenceCount--
|
||||
return
|
||||
} else {
|
||||
octopus.Abort(
|
||||
octopus.ReferenceCountBug,
|
||||
diploma.Abort(
|
||||
diploma.ReferenceCountBug,
|
||||
fmt.Errorf("call releaseIndexPage on page %d with reference count = %d",
|
||||
pageNo, p.ReferenceCount),
|
||||
)
|
||||
@@ -98,7 +98,7 @@ func (s *Atree) allocIndexPage() AllocatedPage {
|
||||
} else {
|
||||
s.mutex.Lock()
|
||||
if s.allocatedIndexPagesQty == math.MaxUint32 {
|
||||
octopus.Abort(octopus.MaxAtreeSizeExceeded,
|
||||
diploma.Abort(diploma.MaxAtreeSizeExceeded,
|
||||
errors.New("no space in Atree index"))
|
||||
}
|
||||
s.allocatedIndexPagesQty++
|
||||
@@ -163,8 +163,8 @@ func (s *Atree) releaseDataPage(pageNo uint32) {
|
||||
p.ReferenceCount--
|
||||
return
|
||||
} else {
|
||||
octopus.Abort(
|
||||
octopus.ReferenceCountBug,
|
||||
diploma.Abort(
|
||||
diploma.ReferenceCountBug,
|
||||
fmt.Errorf("call releaseDataPage on page %d with reference count = %d",
|
||||
pageNo, p.ReferenceCount),
|
||||
)
|
||||
@@ -186,7 +186,7 @@ func (s *Atree) allocDataPage() AllocatedPage {
|
||||
} else {
|
||||
s.mutex.Lock()
|
||||
if s.allocatedDataPagesQty == math.MaxUint32 {
|
||||
octopus.Abort(octopus.MaxAtreeSizeExceeded,
|
||||
diploma.Abort(diploma.MaxAtreeSizeExceeded,
|
||||
errors.New("no space in Atree index"))
|
||||
}
|
||||
s.allocatedDataPagesQty++
|
||||
@@ -303,7 +303,7 @@ func (s *Atree) pageWriter() {
|
||||
case <-s.writeSignalCh:
|
||||
err := s.writeTasks()
|
||||
if err != nil {
|
||||
octopus.Abort(octopus.WriteToAtreeFailed, err)
|
||||
diploma.Abort(diploma.WriteToAtreeFailed, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
660
atree/select.go
660
atree/select.go
@@ -3,81 +3,16 @@ package atree
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma"
|
||||
)
|
||||
|
||||
type IterateAllCumulativeByTreeCursorReq struct {
|
||||
type ContinueFullScanReq struct {
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
EndTimestamp uint32
|
||||
EndValue float64
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) IterateAllCumulativeByTreeCursor(req IterateAllCumulativeByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.PageNo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.PageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp = req.EndTimestamp
|
||||
endValue = req.EndValue
|
||||
)
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endTimestamp = timestamp
|
||||
endValue = value
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueIterateCumulativeByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
ResponseWriter AtreeMeasureConsumer
|
||||
LastPageNo uint32
|
||||
EndTimestamp uint32
|
||||
EndValue float64
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueIterateCumulativeByTreeCursor(req ContinueIterateCumulativeByTreeCursorReq) error {
|
||||
func (s *Atree) ContinueFullScan(req ContinueFullScanReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
@@ -88,532 +23,107 @@ func (s *Atree) ContinueIterateCumulativeByTreeCursor(req ContinueIterateCumulat
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
MetricType: diploma.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp = req.EndTimestamp
|
||||
endValue = req.EndValue
|
||||
)
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueRangeScanReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter AtreeMeasureConsumer
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueRangeScan(req ContinueRangeScanReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: diploma.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RangeScanReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter AtreeMeasureConsumer
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) RangeScan(req RangeScanReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: diploma.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ResponseWriter.Feed(timestamp, value)
|
||||
|
||||
if timestamp < req.Since {
|
||||
// - записи, удовлетворяющие временным рамкам, закончились.
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// bug panic
|
||||
panic("continue cumulative but timestamp > req.Until")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindAndIterateCumulativeByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
RootPageNo uint32
|
||||
ResponseWriter *CumulativeMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) FindAndIterateCumulativeByTreeCursor(req FindAndIterateCumulativeByTreeCursorReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var (
|
||||
endTimestamp uint32
|
||||
endValue float64
|
||||
)
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
if endTimestamp > 0 {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
continue
|
||||
}
|
||||
|
||||
if endTimestamp > 0 {
|
||||
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
|
||||
Timestamp: endTimestamp,
|
||||
Value: endValue,
|
||||
Total: endValue - value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
endTimestamp = timestamp
|
||||
endValue = value
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type IterateAllInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
PageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) IterateAllInstantByTreeCursor(req IterateAllInstantByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.PageNo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.PageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueIterateInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
LastPageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueIterateInstantByTreeCursor(req ContinueIterateInstantByTreeCursorReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
// - записи закончились;
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
panic("continue instant timestamp > req.Until")
|
||||
}
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindAndIterateInstantByTreeCursorReq struct {
|
||||
FracDigits byte
|
||||
Since uint32
|
||||
Until uint32
|
||||
RootPageNo uint32
|
||||
ResponseWriter *InstantMeasureWriter
|
||||
}
|
||||
|
||||
func (s *Atree) FindAndIterateInstantByTreeCursor(req FindAndIterateInstantByTreeCursorReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp > req.Until {
|
||||
continue
|
||||
}
|
||||
|
||||
if timestamp < req.Since {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueCollectInstantPeriodsReq struct {
|
||||
FracDigits byte
|
||||
Aggregator *InstantAggregator
|
||||
ResponseWriter *InstantPeriodsWriter
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueCollectInstantPeriods(req ContinueCollectInstantPeriodsReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var period InstantPeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := req.Aggregator.FillPeriod(timestamp, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindInstantPeriodsReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter *InstantPeriodsWriter
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
FirstHourOfDay int
|
||||
LastDayOfMonth int
|
||||
}
|
||||
|
||||
func (s *Atree) FindInstantPeriods(req FindInstantPeriodsReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggregator, err := NewInstantAggregator(InstantAggregatorOptions{
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
LastDayOfMonth: req.LastDayOfMonth,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Instant,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
var period InstantPeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := aggregator.FillPeriod(timestamp, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type FindCumulativePeriodsReq struct {
|
||||
FracDigits byte
|
||||
ResponseWriter *CumulativePeriodsWriter
|
||||
RootPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
GroupBy octopus.GroupBy
|
||||
FirstHourOfDay int
|
||||
LastDayOfMonth int
|
||||
}
|
||||
|
||||
func (s *Atree) FindCumulativePeriods(req FindCumulativePeriodsReq) error {
|
||||
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggregator, err := NewCumulativeAggregator(CumulativeAggregatorOptions{
|
||||
GroupBy: req.GroupBy,
|
||||
FirstHourOfDay: req.FirstHourOfDay,
|
||||
LastDayOfMonth: req.LastDayOfMonth,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: pageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
var period CumulativePeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := cursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := aggregator.FillPeriod(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ContinueCollectCumulativePeriodsReq struct {
|
||||
FracDigits byte
|
||||
Aggregator *CumulativeAggregator
|
||||
ResponseWriter *CumulativePeriodsWriter
|
||||
LastPageNo uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
}
|
||||
|
||||
func (s *Atree) ContinueCollectCumulativePeriods(req ContinueCollectCumulativePeriodsReq) error {
|
||||
buf, err := s.fetchDataPage(req.LastPageNo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
|
||||
}
|
||||
|
||||
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
|
||||
PageNo: req.LastPageNo,
|
||||
PageData: buf,
|
||||
Atree: s,
|
||||
FracDigits: req.FracDigits,
|
||||
MetricType: octopus.Cumulative,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer treeCursor.Close()
|
||||
|
||||
var period CumulativePeriod
|
||||
|
||||
for {
|
||||
timestamp, value, done, err := treeCursor.Prev()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done || timestamp < req.Since {
|
||||
isCompleted := req.Aggregator.FillPeriod(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if timestamp <= req.Until {
|
||||
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
|
||||
if isCompleted {
|
||||
err := req.ResponseWriter.WritePeriod(period)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
307
atree/writers.go
307
atree/writers.go
@@ -1,306 +1,15 @@
|
||||
package atree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
octopus "gordenko.dev/dima/diploma"
|
||||
"gordenko.dev/dima/diploma/bin"
|
||||
"gordenko.dev/dima/diploma/proto"
|
||||
)
|
||||
|
||||
// CURRENT VALUE WRITER
|
||||
|
||||
type CurrentValue struct {
|
||||
MetricID uint32
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
type PeriodsWriter interface {
|
||||
Feed(uint32, float64)
|
||||
FeedNoSend(uint32, float64)
|
||||
Close() error
|
||||
}
|
||||
|
||||
type CurrentValueWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
type WorkerMeasureConsumer interface {
|
||||
FeedNoSend(uint32, float64)
|
||||
}
|
||||
|
||||
func NewCurrentValueWriter(dst io.Writer) *CurrentValueWriter {
|
||||
return &CurrentValueWriter{
|
||||
arr: make([]byte, 16),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) BufferValue(m CurrentValue) {
|
||||
bin.PutUint32(s.arr[0:], m.MetricID)
|
||||
bin.PutUint32(s.arr[4:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[8:], m.Value)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CurrentValueWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// INSTANT MEASURE WRITER
|
||||
|
||||
type InstantMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
}
|
||||
|
||||
type InstantMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewInstantMeasureWriter(dst io.Writer) *InstantMeasureWriter {
|
||||
return &InstantMeasureWriter{
|
||||
arr: make([]byte, 12),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) BufferMeasure(m InstantMeasure) {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) WriteMeasure(m InstantMeasure) error {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantMeasureWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// CUMULATIVE MEASURE WRITER
|
||||
|
||||
type CumulativeMeasure struct {
|
||||
Timestamp uint32
|
||||
Value float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
type CumulativeMeasureWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCumulativeMeasureWriter(dst io.Writer) *CumulativeMeasureWriter {
|
||||
return &CumulativeMeasureWriter{
|
||||
arr: make([]byte, 20),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) BufferMeasure(m CumulativeMeasure) {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
bin.PutFloat64(s.arr[12:], m.Total)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) WriteMeasure(m CumulativeMeasure) error {
|
||||
bin.PutUint32(s.arr[0:], m.Timestamp)
|
||||
bin.PutFloat64(s.arr[4:], m.Value)
|
||||
bin.PutFloat64(s.arr[12:], m.Total)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativeMeasureWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
// INSTANT AGGREGATE WRITER
|
||||
|
||||
type InstantPeriodsWriter struct {
|
||||
aggregateFuncs byte
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewInstantPeriodsWriter(dst io.Writer, aggregateFuncs byte) *InstantPeriodsWriter {
|
||||
var q int
|
||||
if (aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
|
||||
q++
|
||||
}
|
||||
if (aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
|
||||
q++
|
||||
}
|
||||
if (aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
|
||||
q++
|
||||
}
|
||||
return &InstantPeriodsWriter{
|
||||
aggregateFuncs: aggregateFuncs,
|
||||
arr: make([]byte, 12+q*8),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
type InstantPeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
Min float64
|
||||
Max float64
|
||||
Avg float64
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) BufferMeasure(p InstantPeriod) {
|
||||
s.pack(p)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) WritePeriod(p InstantPeriod) error {
|
||||
s.pack(p)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
func (s *InstantPeriodsWriter) pack(p InstantPeriod) {
|
||||
bin.PutUint32(s.arr[0:], p.Period)
|
||||
bin.PutUint32(s.arr[4:], p.Since)
|
||||
bin.PutUint32(s.arr[8:], p.Until)
|
||||
|
||||
pos := 12
|
||||
if (s.aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
|
||||
bin.PutFloat64(s.arr[pos:], p.Min)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
|
||||
bin.PutFloat64(s.arr[pos:], p.Max)
|
||||
pos += 8
|
||||
}
|
||||
if (s.aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
|
||||
bin.PutFloat64(s.arr[pos:], p.Avg)
|
||||
}
|
||||
}
|
||||
|
||||
// CUMULATIVE AGGREGATE WRITER
|
||||
|
||||
type CumulativePeriodsWriter struct {
|
||||
arr []byte
|
||||
responder *ChunkedResponder
|
||||
}
|
||||
|
||||
func NewCumulativePeriodsWriter(dst io.Writer) *CumulativePeriodsWriter {
|
||||
return &CumulativePeriodsWriter{
|
||||
arr: make([]byte, 28),
|
||||
responder: NewChunkedResponder(dst),
|
||||
}
|
||||
}
|
||||
|
||||
type CumulativePeriod struct {
|
||||
Period uint32
|
||||
Since uint32
|
||||
Until uint32
|
||||
EndValue float64
|
||||
Total float64
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) BufferMeasure(p CumulativePeriod) {
|
||||
s.pack(p)
|
||||
s.responder.BufferRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) WritePeriod(p CumulativePeriod) error {
|
||||
s.pack(p)
|
||||
return s.responder.AppendRecord(s.arr)
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) Close() error {
|
||||
return s.responder.Flush()
|
||||
}
|
||||
|
||||
func (s *CumulativePeriodsWriter) pack(p CumulativePeriod) {
|
||||
bin.PutUint32(s.arr[0:], p.Period)
|
||||
bin.PutUint32(s.arr[4:], p.Since)
|
||||
bin.PutUint32(s.arr[8:], p.Until)
|
||||
bin.PutFloat64(s.arr[12:], p.EndValue)
|
||||
bin.PutFloat64(s.arr[20:], p.Total)
|
||||
}
|
||||
|
||||
// CHUNKED RESPONDER
|
||||
|
||||
//const headerSize = 3
|
||||
|
||||
var endMsg = []byte{
|
||||
proto.RespEndOfValue, // end of stream
|
||||
}
|
||||
|
||||
type ChunkedResponder struct {
|
||||
recordsQty int
|
||||
buf *bytes.Buffer
|
||||
dst io.Writer
|
||||
}
|
||||
|
||||
func NewChunkedResponder(dst io.Writer) *ChunkedResponder {
|
||||
s := &ChunkedResponder{
|
||||
recordsQty: 0,
|
||||
buf: bytes.NewBuffer(nil),
|
||||
dst: dst,
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) BufferRecord(rec []byte) {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) AppendRecord(rec []byte) error {
|
||||
s.buf.Write(rec)
|
||||
s.recordsQty++
|
||||
|
||||
if s.buf.Len() < 1500 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.buf.Write([]byte{
|
||||
proto.RespPartOfValue, // message type
|
||||
0, 0, 0, 0, // records qty
|
||||
})
|
||||
s.recordsQty = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) Flush() error {
|
||||
if s.recordsQty > 0 {
|
||||
if err := s.sendBuffered(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := s.dst.Write(endMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ChunkedResponder) sendBuffered() (err error) {
|
||||
msg := s.buf.Bytes()
|
||||
bin.PutUint32(msg[1:], uint32(s.recordsQty))
|
||||
n, err := s.dst.Write(msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(msg) {
|
||||
return fmt.Errorf("incomplete write %d bytes instead of %d", n, len(msg))
|
||||
}
|
||||
s.buf.Reset()
|
||||
return
|
||||
type AtreeMeasureConsumer interface {
|
||||
Feed(uint32, float64)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user