Dima Gordenko 3 months ago
parent 0f50873f0f
commit fbb30f31e8
  1. 132
      README.md
  2. 325
      atree/aggregate.go
  3. 497
      atree/atree.go
  4. 187
      atree/cursor.go
  5. 430
      atree/io.go
  6. 214
      atree/misc.go
  7. 96
      atree/redo/reader.go
  8. 207
      atree/redo/writer.go
  9. 619
      atree/select.go
  10. 306
      atree/writers.go
  11. 621
      bin/bin.go
  12. 138
      bufreader/bufreader.go
  13. 3
      chunkenc/chunckenc.go
  14. 330
      chunkenc/cumdelta.go
  15. 345
      chunkenc/insdelta.go
  16. 374
      chunkenc/time_delta_delta.go
  17. 755
      client/client.go
  18. 459
      conbuf/conbuf.go
  19. 4
      database.ini
  20. 1058
      database/api.go
  21. 460
      database/database.go
  22. 50
      database/helpers.go
  23. 71
      database/metric.go
  24. 1736
      database/proc.go
  25. 287
      database/snapshot.go
  26. BIN
      database_linux
  27. BIN
      database_windows
  28. 88
      diploma.go
  29. 102
      enc/cumdelta.go
  30. 3
      enc/enc.go
  31. 130
      enc/insdelta.go
  32. 145
      enc/time_delta_delta.go
  33. 135
      examples/database/main.go
  34. 377
      examples/loadtest/loadtest.go
  35. 261
      examples/loadtest/main.go
  36. 81
      examples/requests/generate.go
  37. 90
      examples/requests/main.go
  38. 361
      examples/requests/requests.go
  39. 72
      freelist/freelist.go
  40. 13
      go.mod
  41. 20
      go.sum
  42. 11
      linux_build.sh
  43. 9
      loadtest.ini
  44. BIN
      loadtest_linux
  45. BIN
      loadtest_windows
  46. 473
      proto/proto.go
  47. 255
      recovery/advisor.go
  48. 1
      requests.ini
  49. BIN
      requests_linux
  50. BIN
      requests_windows
  51. 39
      timeutil/timeutil.go
  52. 346
      txlog/reader.go
  53. 507
      txlog/writer.go
  54. 11
      windows_build.sh

@ -0,0 +1,132 @@
1. Zip-архів із базою даних на 841 млн показань (791 МБ) можна скачати за посиланням https://drive.google.com/file/d/18oks6STkVpg4-TT2WyCIBBpCgRyF29L2
В архіві 5 файлів:
2.changes
2.snapshot
test.data
test.index
metrics.info
Після розпаковки їх необхідно перемістити в директорію testdir.
2. Якщо є бажання експериментувати з пустою базою даних, краще створити нову. Наприклад, назвемо її "x": створюємо директорію xdir, створюємо файл x.ini із такими налаштуваннями:
tcpPort = 12345
dir = xdir
redoDir = xdir
databaseName = x
Запускаємо СУБД із термінала:
./database_linux -c x.ini
Всі операції виконуємо в кореневій директорії проєкту.
3. Файли *.ini:
database.ini - налаштування СУБД (database);
loadtest.ini - налаштування навантажувального тесту (loadtest);
requests.ini - налаштування прикладів запитів (requests).
4. Скомпільоване ПО для Linux (64-бітна архітектура):
database_linux - СУБД;
loadtest_linux - навантажувального тест;
requests_linux - приклади запитів.
5. Скомпільоване ПО для Windows (64-бітна архітектура):
database_windows - СУБД;
loadtest_windows - навантажувального тест;
requests_windows - приклади запитів.
6. Директорія examples має три вкладені директорії із вихідними кодами:
database - запуску СУБД;
loadtest - навантажувального тесту;
requests - різних типів запитів, що підтримуються СУБД.
7. Якщо на комп'ютері встановлено компілятор Go, можна скомпілювати згадані вище програми за допомогою bash-скриптів:
./linux_build.sh
./windows_build.sh
Скомпільовані версії програм опиняться в кореневій директорії проєкту.
8. Файли з даними розміщуються в директорії datadir. В базі даних вже записано 841 млн. показань. Сумарний розмір трохи більше 1.3 GB.
9. Налаштування навантажувального тесту можна змінити, відредагувавши файл loadtest.ini.
Опція connections - це кількість одночасно відкритих підключень до СУБД.
Опція requestsPerConn - це кількість запитів, які відправляє потік через одне відкрите підключення.
Звіт виглядає наступним чином:
TEST RESULTS:
Time: 2 seconds
Connections: 100
Requests per conn: 500
Total requests: 50000
AVG request time: 3.121022ms
RPS: 26891
listCumulativeMeasures: 3099 (6.2%), AVG request time: 3.785916ms
listCumulativePeriods: 12055 (24.1%), AVG request time: 2.726391ms
listInstantMeasures: 1974 (3.9%), AVG request time: 6.726605ms
listInstantPeriods: 7710 (15.4%), AVG request time: 2.9808ms
listCurrentValues: 25162 (50.3%), AVG request time: 2.988301ms
last day: 20382 (82.1%), AVG request time: 2.954718ms
last week: 2993 (12.1%), AVG request time: 4.050662ms
last month: 708 (2.9%), AVG request time: 8.248486ms
random time range: 755 (3.0%), AVG request time: 3.540239ms
Навантажувальний тест відправляє випадкові запити до випадкових метрик. Оскільки на реальному проєкті запити та часові діапазони мають різну ймовірність - я реалізував це в навантажувальному тесті.
Приклад 1:
listCurrentValues: 25162 (50.3%) , AVG request time: 2.988301ms
означає що відправлено 25162 запитів listCurrentValues, що склало 50.3% від загальної кількості запитів. Середній час виконання (Latency) запитів listCurrentValues склав 2.99 міллісекунди.
Приклад 2:
last month: 708 (2.9%), AVG request time: 8.248486ms
означає що відправлено 708 запитів на отримання даних за останній місяць, що склало 2.9% від загальної кількості. Середній час виконання (Latency) таких запитів склав 8.25 міллісекунди. Часовий діапазон задається для всіх запитів окрім listCurrentValues.
10. Запуск навантажувального тесту:
Виконуємо пункт 1.
Запускаємо СУБД із термінала:
./database_linux
Запускаємо тест із іншого термінала:
./loadtest_linux
Чекаємо завершення. Звіт буде надруковано у терміналі після завершення тесту.
Команди для Windows:
./database_windows
./loadtest_windows
11. Запуск прикладів запитів:
Можна запускати на пустій базі даних (без скачування Zip-архіва із показаннями).
Запускаємо СУБД із термінала:
./database_linux
Запускаємо тест із іншого термінала:
./requests_linux
Результат виконання запитів друкується у терміналі.
Команди для Windows:
./database_windows
./requests_windows
12. Подивитись HELP:
./database_linux -h
./loadtest_linux -h
./requests_linux -h
13. Зупинити СУБД, перервати навантажувальний тест:
Ctrl + C

@ -0,0 +1,325 @@
package atree
import (
"fmt"
"time"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/timeutil"
)
// AGGREGATE
type InstantAggregator struct {
firstHourOfDay int
lastDayOfMonth int
time2period func(uint32) uint32
currentPeriod uint32
since uint32
until uint32
min float64
max float64
total float64
entries int
}
type InstantAggregatorOptions struct {
GroupBy diploma.GroupBy
FirstHourOfDay int
LastDayOfMonth int
}
func NewInstantAggregator(opt InstantAggregatorOptions) (*InstantAggregator, error) {
s := &InstantAggregator{
firstHourOfDay: opt.FirstHourOfDay,
lastDayOfMonth: opt.LastDayOfMonth,
}
switch opt.GroupBy {
case diploma.GroupByHour:
s.time2period = groupByHour
case diploma.GroupByDay:
if s.firstHourOfDay > 0 {
s.time2period = s.groupByDayUsingFHD
} else {
s.time2period = groupByDay
}
case diploma.GroupByMonth:
if s.firstHourOfDay > 0 {
if s.lastDayOfMonth > 0 {
s.time2period = s.groupByMonthUsingFHDAndLDM
} else {
s.time2period = s.groupByMonthUsingFHD
}
} else {
if s.lastDayOfMonth > 0 {
s.time2period = s.groupByMonthUsingLDM
} else {
s.time2period = groupByMonth
}
}
default:
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
}
return s, nil
}
// Приходят данные от свежих к старым, тоесть сперва получаю Until.
// return period complete flag
func (s *InstantAggregator) Feed(timestamp uint32, value float64, p *InstantPeriod) bool {
period := s.time2period(timestamp)
//fmt.Printf("feed: %s %v, period: %s\n", time.Unix(int64(timestamp), 0), value, time.Unix(int64(period), 0))
if s.entries == 0 {
s.currentPeriod = period
s.since = timestamp
s.until = timestamp
s.min = value
s.max = value
s.total = value
s.entries = 1
return false
}
if period != s.currentPeriod {
// готовый период
s.FillPeriod(timestamp, p)
s.currentPeriod = period
s.since = timestamp
s.until = timestamp
s.min = value
s.max = value
s.total = value
s.entries = 1
return true
}
if value < s.min {
s.min = value
} else if value > s.max {
s.max = value
}
// для подсчета AVG
s.total += value
s.entries++
// начало периода
s.since = timestamp
return false
}
func (s *InstantAggregator) FillPeriod(prevTimestamp uint32, p *InstantPeriod) bool {
if s.entries == 0 {
return false
}
//fmt.Printf("FillPeriod: %s, prevTimestamp: %s\n", time.Unix(int64(s.currentPeriod), 0), time.Unix(int64(prevTimestamp), 0))
p.Period = s.currentPeriod
if prevTimestamp > 0 {
p.Since = prevTimestamp
} else {
p.Since = s.since
}
p.Until = s.until
p.Min = s.min
p.Max = s.max
p.Avg = s.total / float64(s.entries)
return true
}
func (s *InstantAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
return uint32(tm.Unix())
}
func (s *InstantAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
return uint32(tm.Unix())
}
func (s *InstantAggregator) groupByMonthUsingLDM(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Day() > s.lastDayOfMonth {
tm = tm.AddDate(0, 1, 0)
}
return uint32(tm.Unix())
}
func (s *InstantAggregator) groupByMonthUsingFHDAndLDM(timestamp uint32) uint32 {
// ВАЖНО!
// Сперва проверяю время.
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
if tm.Day() > s.lastDayOfMonth {
tm = tm.AddDate(0, 1, 0)
}
return uint32(tm.Unix())
}
// CUMULATIVE
type CumulativeAggregator struct {
firstHourOfDay int
lastDayOfMonth int
time2period func(uint32) uint32
currentPeriod uint32
since uint32
until uint32
sinceValue float64
untilValue float64
entries int
}
type CumulativeAggregatorOptions struct {
GroupBy diploma.GroupBy
FirstHourOfDay int
LastDayOfMonth int
}
func NewCumulativeAggregator(opt CumulativeAggregatorOptions) (*CumulativeAggregator, error) {
s := &CumulativeAggregator{
firstHourOfDay: opt.FirstHourOfDay,
lastDayOfMonth: opt.LastDayOfMonth,
}
switch opt.GroupBy {
case diploma.GroupByHour:
s.time2period = groupByHour
case diploma.GroupByDay:
if s.firstHourOfDay > 0 {
s.time2period = s.groupByDayUsingFHD
} else {
s.time2period = groupByDay
}
case diploma.GroupByMonth:
if s.firstHourOfDay > 0 {
if s.lastDayOfMonth > 0 {
s.time2period = s.groupByMonthUsingFHDAndLDM
} else {
s.time2period = s.groupByMonthUsingFHD
}
} else {
if s.lastDayOfMonth > 0 {
s.time2period = s.groupByMonthUsingLDM
} else {
s.time2period = groupByMonth
}
}
default:
return nil, fmt.Errorf("unknown groupBy %d option", opt.GroupBy)
}
return s, nil
}
// return period complete flag
func (s *CumulativeAggregator) Feed(timestamp uint32, value float64, p *CumulativePeriod) bool {
period := s.time2period(timestamp)
if s.entries == 0 {
s.currentPeriod = period
s.since = timestamp
s.until = timestamp
s.sinceValue = value
s.untilValue = value
s.entries = 1
return false
}
if period != s.currentPeriod {
// готовый период
s.FillPeriod(timestamp, value, p)
s.currentPeriod = period
s.since = timestamp
s.until = timestamp
s.sinceValue = value
s.untilValue = value
s.entries = 1
return true
}
// начало периода
s.since = timestamp
s.sinceValue = value
s.entries++
return false
}
func (s *CumulativeAggregator) FillPeriod(prevTimestamp uint32, value float64, p *CumulativePeriod) bool {
if s.entries == 0 {
return false
}
p.Period = s.currentPeriod
if prevTimestamp > 0 {
p.Since = prevTimestamp
p.Total = s.untilValue - value
} else {
p.Since = s.since
p.Total = s.untilValue - s.sinceValue
}
p.Until = s.until
p.EndValue = s.untilValue
return true
}
func (s *CumulativeAggregator) groupByDayUsingFHD(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
return uint32(tm.Unix())
}
func (s *CumulativeAggregator) groupByMonthUsingFHD(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
return uint32(tm.Unix())
}
func (s *CumulativeAggregator) groupByMonthUsingLDM(timestamp uint32) uint32 {
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Day() > s.lastDayOfMonth {
tm = tm.AddDate(0, 1, 0)
}
return uint32(tm.Unix())
}
func (s *CumulativeAggregator) groupByMonthUsingFHDAndLDM(timestamp uint32) uint32 {
// ВАЖНО!
// Сперва проверяю время.
tm := timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m")
if tm.Hour() < s.firstHourOfDay {
tm = tm.AddDate(0, 0, -1)
}
if tm.Day() > s.lastDayOfMonth {
tm = tm.AddDate(0, 1, 0)
}
return uint32(tm.Unix())
}
func groupByHour(timestamp uint32) uint32 {
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "h").Unix())
}
func groupByDay(timestamp uint32) uint32 {
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "d").Unix())
}
func groupByMonth(timestamp uint32) uint32 {
return uint32(timeutil.FirstSecondInPeriod(time.Unix(int64(timestamp), 0), "m").Unix())
}

@ -0,0 +1,497 @@
package atree
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/atree/redo"
"gordenko.dev/dima/diploma/bin"
)
const (
filePerm = 0770
// index page
indexRecordsQtyIdx = IndexPageSize - 7
isDataPageNumbersIdx = IndexPageSize - 5
indexCRC32Idx = IndexPageSize - 4
// data page
timestampsSizeIdx = DataPageSize - 12
valuesSizeIdx = DataPageSize - 10
prevPageIdx = DataPageSize - 8
dataCRC32Idx = DataPageSize - 4
timestampSize = 4
pairSize = timestampSize + PageNoSize
indexFooterIdx = indexRecordsQtyIdx
dataFooterIdx = timestampsSizeIdx
DataPageSize = 8192
IndexPageSize = 1024
PageNoSize = 4
//
DataPagePayloadSize int = dataFooterIdx
)
type FreeList interface {
ReservePage() uint32
}
type _page struct {
PageNo uint32
Buf []byte
ReferenceCount int
}
type Atree struct {
redoDir string
indexFreelist FreeList
dataFreelist FreeList
dataFile *os.File
indexFile *os.File
mutex sync.Mutex
allocatedIndexPagesQty uint32
allocatedDataPagesQty uint32
indexPages map[uint32]*_page
dataPages map[uint32]*_page
indexWaits map[uint32][]chan readResult
dataWaits map[uint32][]chan readResult
indexPagesToRead []uint32
dataPagesToRead []uint32
readSignalCh chan struct{}
writeSignalCh chan struct{}
writeTasksQueue []WriteTask
}
type Options struct {
Dir string
RedoDir string
DatabaseName string
DataFreeList FreeList
IndexFreeList FreeList
}
func New(opt Options) (*Atree, error) {
if opt.Dir == "" {
return nil, errors.New("Dir option is required")
}
if opt.RedoDir == "" {
return nil, errors.New("RedoDir option is required")
}
if opt.DatabaseName == "" {
return nil, errors.New("DatabaseName option is required")
}
if opt.DataFreeList == nil {
return nil, errors.New("DataFreeList option is required")
}
if opt.IndexFreeList == nil {
return nil, errors.New("IndexFreeList option is required")
}
// открываю или создаю dbName.data и dbName.index файлы
var (
indexFileName = filepath.Join(opt.Dir, opt.DatabaseName+".index")
dataFileName = filepath.Join(opt.Dir, opt.DatabaseName+".data")
indexFile *os.File
dataFile *os.File
allocatedIndexPagesQty uint32
allocatedDataPagesQty uint32
)
// При создании data файла сразу создается индекс, поэтому корректное
// состояние БД: либо оба файла есть, либо ни одного файла нет.
isIndexExist, err := isFileExist(indexFileName)
if err != nil {
return nil, fmt.Errorf("check index file is exist: %s", err)
}
isDataExist, err := isFileExist(dataFileName)
if err != nil {
return nil, fmt.Errorf("check data file is exist: %s", err)
}
if isIndexExist {
if isDataExist {
// открываю оба файла
indexFile, allocatedIndexPagesQty, err = openFile(indexFileName, IndexPageSize)
if err != nil {
return nil, fmt.Errorf("open index file: %s", err)
}
dataFile, allocatedDataPagesQty, err = openFile(dataFileName, DataPageSize)
if err != nil {
return nil, fmt.Errorf("open data file: %s", err)
}
} else {
// нет data файла
return nil, errors.New("not found data file")
}
} else {
if isDataExist {
// index файла нет
return nil, errors.New("not found index file")
} else {
// нет обоих файлов
indexFile, err = os.OpenFile(indexFileName, os.O_CREATE|os.O_RDWR, filePerm)
if err != nil {
return nil, err
}
dataFile, err = os.OpenFile(dataFileName, os.O_CREATE|os.O_RDWR, filePerm)
if err != nil {
return nil, err
}
}
}
tree := &Atree{
redoDir: opt.RedoDir,
indexFreelist: opt.IndexFreeList,
dataFreelist: opt.DataFreeList,
indexFile: indexFile,
dataFile: dataFile,
allocatedIndexPagesQty: allocatedIndexPagesQty,
allocatedDataPagesQty: allocatedDataPagesQty,
indexPages: make(map[uint32]*_page),
dataPages: make(map[uint32]*_page),
indexWaits: make(map[uint32][]chan readResult),
dataWaits: make(map[uint32][]chan readResult),
readSignalCh: make(chan struct{}, 1),
writeSignalCh: make(chan struct{}, 1),
}
return tree, nil
}
func (s *Atree) Run() {
go s.pageWriter()
go s.pageReader()
}
// FIND
func (s *Atree) findDataPage(rootPageNo uint32, timestamp uint32) (uint32, []byte, error) {
indexPageNo := rootPageNo
for {
buf, err := s.fetchIndexPage(indexPageNo)
if err != nil {
return 0, nil, fmt.Errorf("fetchIndexPage(%d): %s", indexPageNo, err)
}
foundPageNo := findPageNo(buf, timestamp)
s.releaseIndexPage(indexPageNo)
if buf[isDataPageNumbersIdx] == 1 {
buf, err := s.fetchDataPage(foundPageNo)
if err != nil {
return 0, nil, fmt.Errorf("fetchDataPage(%d): %s", foundPageNo, err)
}
return foundPageNo, buf, nil
}
// вглубь
indexPageNo = foundPageNo
}
}
type pathLeg struct {
PageNo uint32
Data []byte
}
type pathToDataPage struct {
Legs []pathLeg
LastPageNo uint32
}
func (s *Atree) findPathToLastPage(rootPageNo uint32) (_ pathToDataPage, err error) {
var (
pageNo = rootPageNo
legs []pathLeg
)
for {
var buf []byte
buf, err = s.fetchIndexPage(pageNo)
if err != nil {
err = fmt.Errorf("FetchIndexPage(%d): %s", pageNo, err)
return
}
legs = append(legs, pathLeg{
PageNo: pageNo,
Data: buf,
// childIdx не нужен
})
foundPageNo := getLastPageNo(buf)
if buf[isDataPageNumbersIdx] == 1 {
return pathToDataPage{
Legs: legs,
LastPageNo: foundPageNo,
}, nil
}
// вглубь
pageNo = foundPageNo
}
}
// APPEND DATA PAGE
type AppendDataPageReq struct {
MetricID uint32
Timestamp uint32
Value float64
Since uint32
RootPageNo uint32
PrevPageNo uint32
TimestampsChunks [][]byte
TimestampsSize uint16
ValuesChunks [][]byte
ValuesSize uint16
}
func (s *Atree) AppendDataPage(req AppendDataPageReq) (_ redo.Report, err error) {
var (
flags byte
dataPagesToRelease []uint32
indexPagesToRelease []uint32
)
newDataPage := s.allocDataPage()
dataPagesToRelease = append(dataPagesToRelease, newDataPage.PageNo)
chunksToDataPage(newDataPage.Data, chunksToDataPageReq{
PrevPageNo: req.PrevPageNo,
TimestampsChunks: req.TimestampsChunks,
TimestampsSize: req.TimestampsSize,
ValuesChunks: req.ValuesChunks,
ValuesSize: req.ValuesSize,
})
redoWriter, err := redo.NewWriter(redo.WriterOptions{
Dir: s.redoDir,
MetricID: req.MetricID,
Timestamp: req.Timestamp,
Value: req.Value,
IsDataPageReused: newDataPage.IsReused,
DataPageNo: newDataPage.PageNo,
Page: newDataPage.Data,
})
if err != nil {
return
}
if req.RootPageNo > 0 {
var path pathToDataPage
path, err = s.findPathToLastPage(req.RootPageNo)
if err != nil {
return
}
for _, leg := range path.Legs {
indexPagesToRelease = append(indexPagesToRelease, leg.PageNo)
}
if path.LastPageNo != req.PrevPageNo {
diploma.Abort(
diploma.WrongPrevPageNo,
fmt.Errorf("bug: last pageNo %d in tree != prev pageNo %d in _metric",
path.LastPageNo, req.PrevPageNo),
)
}
newPageNo := newDataPage.PageNo
lastIdx := len(path.Legs) - 1
for legIdx := lastIdx; legIdx >= 0; legIdx-- {
leg := path.Legs[legIdx]
ok := appendPair(leg.Data, req.Since, newPageNo)
if ok {
err = redoWriter.AppendIndexPage(leg.PageNo, leg.Data, 0)
if err != nil {
return
}
break
}
newIndexPage := s.allocIndexPage()
indexPagesToRelease = append(indexPagesToRelease, newIndexPage.PageNo)
appendPair(newIndexPage.Data, req.Since, newPageNo)
// ставлю мітку що всі pageNo на сторінці - це data pageNo
if legIdx == lastIdx {
newIndexPage.Data[isDataPageNumbersIdx] = 1
}
flags = 0
if newIndexPage.IsReused {
flags |= redo.FlagReused
}
err = redoWriter.AppendIndexPage(newIndexPage.PageNo, newIndexPage.Data, flags)
if err != nil {
return
}
//
newPageNo = newIndexPage.PageNo
if legIdx == 0 {
newRoot := s.allocIndexPage()
indexPagesToRelease = append(indexPagesToRelease, newRoot.PageNo)
appendPair(newRoot.Data, getSince(leg.Data), leg.PageNo) // old rootPageNo
appendPair(newRoot.Data, req.Since, newIndexPage.PageNo)
// Фиксирую новый root в REDO логе
flags = redo.FlagNewRoot
if newRoot.IsReused {
flags |= redo.FlagReused
}
err = redoWriter.AppendIndexPage(newRoot.PageNo, newRoot.Data, flags)
if err != nil {
return
}
break
}
}
} else {
newRoot := s.allocIndexPage()
indexPagesToRelease = append(indexPagesToRelease, newRoot.PageNo)
newRoot.Data[isDataPageNumbersIdx] = 1
appendPair(newRoot.Data, req.Since, newDataPage.PageNo)
flags = redo.FlagNewRoot
if newRoot.IsReused {
flags |= redo.FlagReused
}
err = redoWriter.AppendIndexPage(newRoot.PageNo, newRoot.Data, flags)
if err != nil {
return
}
}
err = redoWriter.Close()
if err != nil {
return
}
// На данний момен схема - наступна. Всі сторінки - data та index - зафіксовані в кеші.
// Отже запис на диск пройде максимально швидко. Після цього ReferenceCount кожної
// сторінки зменшиться на 1. Оскільки на метрику утримується XLock, сторінки мають
// ReferenceCount = 1 (немає інших читачів).
waitCh := make(chan struct{})
task := WriteTask{
WaitCh: waitCh,
DataPage: redo.PageToWrite{
PageNo: newDataPage.PageNo,
Data: newDataPage.Data,
},
IndexPages: redoWriter.IndexPagesToWrite(),
}
s.appendWriteTaskToQueue(task)
<-waitCh
for _, pageNo := range dataPagesToRelease {
s.releaseDataPage(pageNo)
}
for _, pageNo := range indexPagesToRelease {
s.releaseIndexPage(pageNo)
}
return redoWriter.GetReport(), nil
}
// DELETE
type PageLists struct {
DataPages []uint32
IndexPages []uint32
}
type Level struct {
PageNo uint32
PageData []byte
Idx int
ChildQty int
}
func (s *Atree) GetAllPages(rootPageNo uint32) (_ PageLists, err error) {
var (
dataPages []uint32
indexPages []uint32
levels []*Level
)
buf, err := s.fetchIndexPage(rootPageNo)
if err != nil {
err = fmt.Errorf("fetchIndexPage(%d): %s", rootPageNo, err)
return
}
indexPages = append(indexPages, rootPageNo)
if buf[isDataPageNumbersIdx] == 1 {
pageNumbers := listPageNumbers(buf)
dataPages = append(dataPages, pageNumbers...)
s.releaseIndexPage(rootPageNo)
return PageLists{
DataPages: dataPages,
IndexPages: indexPages,
}, nil
}
levels = append(levels, &Level{
PageNo: rootPageNo,
PageData: buf,
Idx: 0,
ChildQty: bin.GetUint16AsInt(buf[indexRecordsQtyIdx:]),
})
for {
if len(levels) == 0 {
return PageLists{
DataPages: dataPages,
IndexPages: indexPages,
}, nil
}
lastIdx := len(levels) - 1
level := levels[lastIdx]
if level.Idx < level.ChildQty {
pageNo := getPageNo(level.PageData, level.Idx)
level.Idx++
var buf []byte
buf, err = s.fetchIndexPage(pageNo)
if err != nil {
err = fmt.Errorf("fetchIndexPage(%d): %s", pageNo, err)
return
}
indexPages = append(indexPages, pageNo)
if buf[isDataPageNumbersIdx] == 1 {
pageNumbers := listPageNumbers(buf)
dataPages = append(dataPages, pageNumbers...)
s.releaseIndexPage(pageNo)
} else {
levels = append(levels, &Level{
PageNo: pageNo,
PageData: buf,
Idx: 0,
ChildQty: bin.GetUint16AsInt(buf[indexRecordsQtyIdx:]),
})
}
} else {
s.releaseIndexPage(level.PageNo)
levels = levels[:lastIdx]
}
}
}

@ -0,0 +1,187 @@
package atree
import (
"errors"
"fmt"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/enc"
)
type BackwardCursor struct {
metricType octopus.MetricType
fracDigits byte
atree *Atree
pageNo uint32
pageData []byte
timestampDecompressor octopus.TimestampDecompressor
valueDecompressor octopus.ValueDecompressor
}
type BackwardCursorOptions struct {
MetricType octopus.MetricType
FracDigits byte
PageNo uint32
PageData []byte
Atree *Atree
}
func NewBackwardCursor(opt BackwardCursorOptions) (*BackwardCursor, error) {
switch opt.MetricType {
case octopus.Instant, octopus.Cumulative:
// ok
default:
return nil, fmt.Errorf("MetricType option has wrong value: %d", opt.MetricType)
}
if opt.FracDigits > octopus.MaxFracDigits {
return nil, errors.New("FracDigits option is required")
}
if opt.Atree == nil {
return nil, errors.New("Atree option is required")
}
if opt.PageNo == 0 {
return nil, errors.New("PageNo option is required")
}
if len(opt.PageData) == 0 {
return nil, errors.New("PageData option is required")
}
s := &BackwardCursor{
metricType: opt.MetricType,
fracDigits: opt.FracDigits,
atree: opt.Atree,
pageNo: opt.PageNo,
pageData: opt.PageData,
}
err := s.makeDecompressors()
if err != nil {
return nil, err
}
return s, nil
}
// timestamp, value, done, error
func (s *BackwardCursor) Prev() (uint32, float64, bool, error) {
var (
timestamp uint32
value float64
done bool
err error
)
timestamp, done = s.timestampDecompressor.NextValue()
if !done {
value, done = s.valueDecompressor.NextValue()
if done {
return 0, 0, false,
fmt.Errorf("corrupted data page %d: has timestamp, no value",
s.pageNo)
}
return timestamp, value, false, nil
}
prevPageNo := bin.GetUint32(s.pageData[prevPageIdx:])
if prevPageNo == 0 {
return 0, 0, true, nil
}
s.atree.releaseDataPage(s.pageNo)
s.pageNo = prevPageNo
s.pageData, err = s.atree.fetchDataPage(s.pageNo)
if err != nil {
return 0, 0, false, fmt.Errorf("atree.fetchDataPage(%d): %s", s.pageNo, err)
}
err = s.makeDecompressors()
if err != nil {
return 0, 0, false, err
}
timestamp, done = s.timestampDecompressor.NextValue()
if done {
return 0, 0, false,
fmt.Errorf("corrupted data page %d: no timestamps",
s.pageNo)
}
value, done = s.valueDecompressor.NextValue()
if done {
return 0, 0, false,
fmt.Errorf("corrupted data page %d: no values",
s.pageNo)
}
return timestamp, value, false, nil
}
func (s *BackwardCursor) Close() {
s.atree.releaseDataPage(s.pageNo)
}
// HELPER
func (s *BackwardCursor) makeDecompressors() error {
timestampsPayloadSize := bin.GetUint16(s.pageData[timestampsSizeIdx:])
valuesPayloadSize := bin.GetUint16(s.pageData[valuesSizeIdx:])
payloadSize := timestampsPayloadSize + valuesPayloadSize
if payloadSize > dataFooterIdx {
return fmt.Errorf("corrupted data page %d: timestamps + values size %d gt payload size",
s.pageNo, payloadSize)
}
s.timestampDecompressor = enc.NewReverseTimeDeltaOfDeltaDecompressor(
s.pageData[:timestampsPayloadSize],
)
vbuf := s.pageData[timestampsPayloadSize : timestampsPayloadSize+valuesPayloadSize]
switch s.metricType {
case octopus.Instant:
s.valueDecompressor = enc.NewReverseInstantDeltaDecompressor(
vbuf, s.fracDigits)
case octopus.Cumulative:
s.valueDecompressor = enc.NewReverseCumulativeDeltaDecompressor(
vbuf, s.fracDigits)
default:
return fmt.Errorf("bug: wrong metricType %d", s.metricType)
}
return nil
}
func makeDecompressors(pageData []byte, metricType octopus.MetricType, fracDigits byte) (
octopus.TimestampDecompressor, octopus.ValueDecompressor, error,
) {
timestampsPayloadSize := bin.GetUint16(pageData[timestampsSizeIdx:])
valuesPayloadSize := bin.GetUint16(pageData[valuesSizeIdx:])
payloadSize := timestampsPayloadSize + valuesPayloadSize
if payloadSize > dataFooterIdx {
return nil, nil, fmt.Errorf("corrupted: timestamps + values size %d > payload size",
payloadSize)
}
timestampDecompressor := enc.NewReverseTimeDeltaOfDeltaDecompressor(
pageData[:timestampsPayloadSize],
)
vbuf := pageData[timestampsPayloadSize : timestampsPayloadSize+valuesPayloadSize]
var valueDecompressor octopus.ValueDecompressor
switch metricType {
case octopus.Instant:
valueDecompressor = enc.NewReverseInstantDeltaDecompressor(
vbuf, fracDigits)
case octopus.Cumulative:
valueDecompressor = enc.NewReverseCumulativeDeltaDecompressor(
vbuf, fracDigits)
default:
return nil, nil, fmt.Errorf("bug: wrong metricType %d", metricType)
}
return timestampDecompressor, valueDecompressor, nil
}

@ -0,0 +1,430 @@
package atree
import (
"errors"
"fmt"
"hash/crc32"
"io/fs"
"math"
"os"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/atree/redo"
"gordenko.dev/dima/diploma/bin"
)
type AllocatedPage struct {
PageNo uint32
Data []byte
IsReused bool
}
type readResult struct {
Data []byte
Err error
}
// INDEX PAGES
func (s *Atree) DeleteIndexPages(pageNumbers []uint32) {
s.mutex.Lock()
for _, pageNo := range pageNumbers {
delete(s.indexPages, pageNo)
}
s.mutex.Unlock()
}
func (s *Atree) fetchIndexPage(pageNo uint32) ([]byte, error) {
s.mutex.Lock()
p, ok := s.indexPages[pageNo]
if ok {
p.ReferenceCount++
s.mutex.Unlock()
return p.Buf, nil
}
resultCh := make(chan readResult, 1)
s.indexWaits[pageNo] = append(s.indexWaits[pageNo], resultCh)
if len(s.indexWaits[pageNo]) == 1 {
s.indexPagesToRead = append(s.indexPagesToRead, pageNo)
s.mutex.Unlock()
select {
case s.readSignalCh <- struct{}{}:
default:
}
} else {
s.mutex.Unlock()
}
result := <-resultCh
if result.Err == nil {
result.Err = s.verifyCRC(result.Data, IndexPageSize)
}
return result.Data, result.Err
}
func (s *Atree) releaseIndexPage(pageNo uint32) {
s.mutex.Lock()
defer s.mutex.Unlock()
p, ok := s.indexPages[pageNo]
if ok {
if p.ReferenceCount > 0 {
p.ReferenceCount--
return
} else {
octopus.Abort(
octopus.ReferenceCountBug,
fmt.Errorf("call releaseIndexPage on page %d with reference count = %d",
pageNo, p.ReferenceCount),
)
}
}
}
func (s *Atree) allocIndexPage() AllocatedPage {
var (
allocated = AllocatedPage{
Data: make([]byte, IndexPageSize),
}
)
allocated.PageNo = s.indexFreelist.ReservePage()
if allocated.PageNo > 0 {
allocated.IsReused = true
s.mutex.Lock()
} else {
s.mutex.Lock()
if s.allocatedIndexPagesQty == math.MaxUint32 {
octopus.Abort(octopus.MaxAtreeSizeExceeded,
errors.New("no space in Atree index"))
}
s.allocatedIndexPagesQty++
allocated.PageNo = s.allocatedIndexPagesQty
}
s.indexPages[allocated.PageNo] = &_page{
PageNo: allocated.PageNo,
Buf: allocated.Data,
ReferenceCount: 1,
}
s.mutex.Unlock()
return allocated
}
// DATA PAGES
func (s *Atree) DeleteDataPages(pageNumbers []uint32) {
s.mutex.Lock()
for _, pageNo := range pageNumbers {
delete(s.dataPages, pageNo)
}
s.mutex.Unlock()
}
func (s *Atree) fetchDataPage(pageNo uint32) ([]byte, error) {
s.mutex.Lock()
p, ok := s.dataPages[pageNo]
if ok {
p.ReferenceCount++
s.mutex.Unlock()
return p.Buf, nil
}
resultCh := make(chan readResult, 1)
s.dataWaits[pageNo] = append(s.dataWaits[pageNo], resultCh)
if len(s.dataWaits[pageNo]) == 1 {
s.dataPagesToRead = append(s.dataPagesToRead, pageNo)
s.mutex.Unlock()
select {
case s.readSignalCh <- struct{}{}:
default:
}
} else {
s.mutex.Unlock()
}
result := <-resultCh
if result.Err == nil {
result.Err = s.verifyCRC(result.Data, DataPageSize)
}
return result.Data, result.Err
}
func (s *Atree) releaseDataPage(pageNo uint32) {
s.mutex.Lock()
defer s.mutex.Unlock()
p, ok := s.dataPages[pageNo]
if ok {
if p.ReferenceCount > 0 {
p.ReferenceCount--
return
} else {
octopus.Abort(
octopus.ReferenceCountBug,
fmt.Errorf("call releaseDataPage on page %d with reference count = %d",
pageNo, p.ReferenceCount),
)
}
}
}
func (s *Atree) allocDataPage() AllocatedPage {
var (
allocated = AllocatedPage{
Data: make([]byte, DataPageSize),
}
)
allocated.PageNo = s.dataFreelist.ReservePage()
if allocated.PageNo > 0 {
allocated.IsReused = true
s.mutex.Lock()
} else {
s.mutex.Lock()
if s.allocatedDataPagesQty == math.MaxUint32 {
octopus.Abort(octopus.MaxAtreeSizeExceeded,
errors.New("no space in Atree index"))
}
s.allocatedDataPagesQty++
allocated.PageNo = s.allocatedDataPagesQty
}
s.dataPages[allocated.PageNo] = &_page{
PageNo: allocated.PageNo,
Buf: allocated.Data,
ReferenceCount: 1,
}
s.mutex.Unlock()
return allocated
}
// READ
func (s *Atree) pageReader() {
for {
select {
case <-s.readSignalCh:
s.readPages()
}
}
}
func (s *Atree) readPages() {
s.mutex.Lock()
if len(s.indexPagesToRead) == 0 && len(s.dataPagesToRead) == 0 {
s.mutex.Unlock()
return
}
indexPagesToRead := s.indexPagesToRead
s.indexPagesToRead = nil
dataPagesToRead := s.dataPagesToRead
s.dataPagesToRead = nil
s.mutex.Unlock()
for _, pageNo := range dataPagesToRead {
buf := make([]byte, DataPageSize)
off := (pageNo - 1) * DataPageSize
n, err := s.dataFile.ReadAt(buf, int64(off))
if n != DataPageSize {
err = fmt.Errorf("read %d instead of %d", n, DataPageSize)
}
s.mutex.Lock()
resultChannels := s.dataWaits[pageNo]
delete(s.dataWaits, pageNo)
if err != nil {
s.mutex.Unlock()
for _, resultCh := range resultChannels {
resultCh <- readResult{
Err: err,
}
}
} else {
s.dataPages[pageNo] = &_page{
PageNo: pageNo,
Buf: buf,
ReferenceCount: len(resultChannels),
}
s.mutex.Unlock()
for _, resultCh := range resultChannels {
resultCh <- readResult{
Data: buf,
}
}
}
}
for _, pageNo := range indexPagesToRead {
buf := make([]byte, IndexPageSize)
off := (pageNo - 1) * IndexPageSize
n, err := s.indexFile.ReadAt(buf, int64(off))
if n != IndexPageSize {
err = fmt.Errorf("read %d instead of %d", n, IndexPageSize)
}
s.mutex.Lock()
resultChannels := s.indexWaits[pageNo]
delete(s.indexWaits, pageNo)
if err != nil {
s.mutex.Unlock()
for _, resultCh := range resultChannels {
resultCh <- readResult{
Err: err,
}
}
} else {
s.indexPages[pageNo] = &_page{
PageNo: pageNo,
Buf: buf,
ReferenceCount: len(resultChannels),
}
s.mutex.Unlock()
for _, resultCh := range resultChannels {
resultCh <- readResult{
Data: buf,
}
}
}
}
}
// WRITE
func (s *Atree) pageWriter() {
for {
select {
case <-s.writeSignalCh:
err := s.writeTasks()
if err != nil {
octopus.Abort(octopus.WriteToAtreeFailed, err)
}
}
}
}
type WriteTask struct {
WaitCh chan struct{}
DataPage redo.PageToWrite
IndexPages []redo.PageToWrite
}
func (s *Atree) appendWriteTaskToQueue(task WriteTask) {
s.mutex.Lock()
s.writeTasksQueue = append(s.writeTasksQueue, task)
s.mutex.Unlock()
select {
case s.writeSignalCh <- struct{}{}:
default:
}
}
func (s *Atree) writeTasks() error {
s.mutex.Lock()
tasks := s.writeTasksQueue
s.writeTasksQueue = nil
s.mutex.Unlock()
for _, task := range tasks {
// data page
p := task.DataPage
if len(p.Data) != DataPageSize {
return fmt.Errorf("wrong data page %d size: %d",
p.PageNo, len(p.Data))
}
off := (p.PageNo - 1) * DataPageSize
n, err := s.dataFile.WriteAt(p.Data, int64(off))
if err != nil {
return err
}
if n != len(p.Data) {
return fmt.Errorf("write %d instead of %d", n, len(p.Data))
}
// index pages
for _, p := range task.IndexPages {
if len(p.Data) != IndexPageSize {
return fmt.Errorf("wrong index page %d size: %d",
p.PageNo, len(p.Data))
}
bin.PutUint32(p.Data[indexCRC32Idx:], crc32.ChecksumIEEE(p.Data[:indexCRC32Idx]))
off := (p.PageNo - 1) * IndexPageSize
n, err := s.indexFile.WriteAt(p.Data, int64(off))
if err != nil {
return err
}
if n != len(p.Data) {
return fmt.Errorf("write %d instead of %d", n, len(p.Data))
}
}
close(task.WaitCh)
}
return nil
}
// IO
func isFileExist(fileName string) (bool, error) {
_, err := os.Stat(fileName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return false, nil
} else {
return false, err
}
} else {
return true, nil
}
}
func openFile(fileName string, pageSize int) (_ *os.File, _ uint32, err error) {
file, err := os.OpenFile(fileName, os.O_RDWR, filePerm)
if err != nil {
return
}
fi, err := file.Stat()
if err != nil {
return
}
fileSize := fi.Size()
if (fileSize % int64(pageSize)) > 0 {
err = fmt.Errorf("the file size %d is not a multiple of the page size %d",
fileSize, pageSize)
return
}
allocatedPagesQty := fileSize / int64(pageSize)
if allocatedPagesQty > math.MaxUint32 {
err = fmt.Errorf("allocated pages %d is > max pages %d",
allocatedPagesQty, math.MaxUint32)
return
}
return file, uint32(allocatedPagesQty), nil
}
func (s *Atree) ApplyREDO(task WriteTask) {
s.appendWriteTaskToQueue(task)
}
func (s *Atree) verifyCRC(data []byte, pageSize int) error {
var (
pos = pageSize - 4
calculatedCRC = crc32.ChecksumIEEE(data[:pos])
storedCRC = bin.GetUint32(data[pos:])
)
if calculatedCRC != storedCRC {
return fmt.Errorf("calculatedCRC %d not equal storedCRC %d",
calculatedCRC, storedCRC)
}
return nil
}

@ -0,0 +1,214 @@
package atree
import (
"hash/crc32"
"gordenko.dev/dima/diploma/bin"
)
type ValueAtComparator struct {
buf []byte
timestamp uint32
}
func (s ValueAtComparator) CompareTo(elemIdx int) int {
var (
pos = elemIdx * timestampSize
elem = bin.GetUint32(s.buf[pos:])
)
if s.timestamp < elem {
return -1
} else if s.timestamp > elem {
return 1
} else {
return 0
}
}
func BinarySearch(qty int, keyComparator bin.KeyComparator) (elemIdx int, isFound bool) {
if qty == 0 {
return
}
a := 0
b := qty - 1
for {
var (
elemIdx = (b-a)/2 + a
code = keyComparator.CompareTo(elemIdx)
)
if code == 1 {
a = elemIdx + 1
if a > b {
return elemIdx, false // +1
}
} else if code == -1 {
b = elemIdx - 1
if b < a {
if elemIdx == 0 {
return 0, false
} else {
return elemIdx - 1, false
}
}
} else {
return elemIdx, true
}
}
}
type chunksToDataPageReq struct {
PrevPageNo uint32
TimestampsChunks [][]byte
TimestampsSize uint16
ValuesChunks [][]byte
ValuesSize uint16
}
func chunksToDataPage(buf []byte, req chunksToDataPageReq) {
bin.PutUint32(buf[prevPageIdx:], req.PrevPageNo)
bin.PutUint16(buf[timestampsSizeIdx:], req.TimestampsSize)
bin.PutUint16(buf[valuesSizeIdx:], req.ValuesSize)
var (
remainingSize = int(req.TimestampsSize)
pos = 0
)
for _, chunk := range req.TimestampsChunks {
if remainingSize >= len(chunk) {
copy(buf[pos:], chunk)
remainingSize -= len(chunk)
pos += len(chunk)
} else {
copy(buf[pos:], chunk[:remainingSize])
break
}
}
remainingSize = int(req.ValuesSize)
pos = int(req.TimestampsSize)
for _, chunk := range req.ValuesChunks {
if remainingSize >= len(chunk) {
copy(buf[pos:], chunk)
remainingSize -= len(chunk)
pos += len(chunk)
} else {
copy(buf[pos:], chunk[:remainingSize])
break
}
}
bin.PutUint32(buf[dataCRC32Idx:], crc32.ChecksumIEEE(buf[:dataCRC32Idx]))
}
func setPrevPageNo(buf []byte, pageNo uint32) {
bin.PutUint32(buf[prevPageIdx:], pageNo)
}
func getPrevPageNo(buf []byte) uint32 {
return bin.GetUint32(buf[prevPageIdx:])
}
func findPageNo(buf []byte, timestamp uint32) (pageNo uint32) {
var (
qty = bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
comparator = ValueAtComparator{
buf: buf,
timestamp: timestamp,
}
)
elemIdx, _ := BinarySearch(qty, comparator)
pos := indexFooterIdx - (elemIdx+1)*PageNoSize
return bin.GetUint32(buf[pos:])
}
func findPageNoIdx(buf []byte, timestamp uint32) (idx int) {
var (
qty = bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
comparator = ValueAtComparator{
buf: buf,
timestamp: timestamp,
}
)
elemIdx, _ := BinarySearch(qty, comparator)
return elemIdx
}
func findPageNoForDeleteSince(buf []byte, since uint32) (uint32, int, int) {
var (
qty = bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
comparator = ValueAtComparator{
buf: buf,
timestamp: since,
}
)
elemIdx, _ := BinarySearch(qty, comparator)
pos := elemIdx * timestampSize
timestamp := bin.GetUint32(buf[pos:])
if timestamp == since {
if elemIdx == 0 {
return 0, qty, -1
}
elemIdx--
}
pos = indexFooterIdx - (elemIdx+1)*PageNoSize
return bin.GetUint32(buf[pos:]), qty, elemIdx
}
func getLastPageNo(buf []byte) (pageNo uint32) {
qty := bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
pos := indexFooterIdx - qty*PageNoSize
return bin.GetUint32(buf[pos:])
}
func getPageNo(buf []byte, idx int) (pageNo uint32) {
pos := indexFooterIdx - (idx+1)*PageNoSize
return bin.GetUint32(buf[pos:])
}
func listPageNumbers(buf []byte) (pageNumbers []uint32) {
qty := bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
pos := indexFooterIdx - PageNoSize
for range qty {
pageNumbers = append(pageNumbers, bin.GetUint32(buf[pos:]))
pos -= PageNoSize
}
return
}
// include since timestamp
func listPageNumbersSince(buf []byte, timestamp uint32) (pageNumbers []uint32) {
var (
qty = bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
comparator = ValueAtComparator{
buf: buf,
timestamp: timestamp,
}
)
elemIdx, _ := BinarySearch(qty, comparator)
pos := indexFooterIdx - (elemIdx+1)*PageNoSize
for range qty {
pageNumbers = append(pageNumbers, bin.GetUint32(buf[pos:]))
pos -= PageNoSize
}
return
}
func getSince(buf []byte) uint32 {
return bin.GetUint32(buf[0:])
}
func appendPair(buf []byte, timestamp uint32, pageNo uint32) bool {
qty := bin.GetUint16AsInt(buf[indexRecordsQtyIdx:])
free := indexFooterIdx - qty*pairSize
if free < pairSize {
return false
}
pos := qty * timestampSize
bin.PutUint32(buf[pos:], timestamp)
pos = indexFooterIdx - (qty+1)*PageNoSize
bin.PutUint32(buf[pos:], pageNo)
bin.PutIntAsUint16(buf[indexRecordsQtyIdx:], qty+1)
return true
}

@ -0,0 +1,96 @@
package redo
import (
"fmt"
"hash/crc32"
"io"
"os"
"gordenko.dev/dima/diploma/bin"
)
type REDOFile struct {
MetricID uint32
Timestamp uint32
Value float64
IsDataPageReused bool
DataPage PageToWrite
IsRootChanged bool
RootPageNo uint32
ReusedIndexPages []uint32
IndexPages []PageToWrite
}
type ReadREDOFileReq struct {
FileName string
DataPageSize int
IndexPageSize int
}
func ReadREDOFile(req ReadREDOFileReq) (*REDOFile, error) {
buf, err := os.ReadFile(req.FileName)
if err != nil {
return nil, err
}
if len(buf) < 25 {
return nil, io.EOF
}
var (
end = len(buf) - 4
payload = buf[:end]
checksum = bin.GetUint32(buf[end:])
calculatedChecksum = crc32.ChecksumIEEE(payload)
)
// Помилка чексуми означає що файл або недописаний, або пошкодженний
if checksum != calculatedChecksum {
return nil, fmt.Errorf("written checksum %d not equal calculated checksum %d",
checksum, calculatedChecksum)
}
var (
redoLog = REDOFile{
MetricID: bin.GetUint32(buf[0:]),
Timestamp: bin.GetUint32(buf[4:]),
Value: bin.GetFloat64(buf[8:]),
IsDataPageReused: buf[16] == 1,
DataPage: PageToWrite{
PageNo: bin.GetUint32(buf[17:]),
Data: buf[21 : 21+req.DataPageSize],
},
}
pos = 21 + req.DataPageSize
)
for {
if pos == len(payload) {
return &redoLog, nil
}
if pos > len(payload) {
return nil, io.EOF
}
flags := buf[pos]
item := PageToWrite{
PageNo: bin.GetUint32(buf[pos+1:]),
}
pos += 5 // flags + pageNo
item.Data = buf[pos : pos+req.IndexPageSize]
pos += req.IndexPageSize
redoLog.IndexPages = append(redoLog.IndexPages, item)
if (flags & FlagReused) == FlagReused {
redoLog.ReusedIndexPages = append(redoLog.ReusedIndexPages, item.PageNo)
}
if (flags & FlagNewRoot) == FlagNewRoot {
redoLog.IsRootChanged = true
redoLog.RootPageNo = item.PageNo
}
}
}

@ -0,0 +1,207 @@
package redo
import (
"errors"
"fmt"
"hash"
"hash/crc32"
"os"
"path/filepath"
"gordenko.dev/dima/diploma/bin"
)
const (
FlagReused byte = 1 // сторінка із FreeList
FlagNewRoot byte = 2 // новая страница
)
type PageToWrite struct {
PageNo uint32
Data []byte
}
type Writer struct {
metricID uint32
timestamp uint32
value float64
tmp []byte
fileName string
file *os.File
hasher hash.Hash32
isDataPageReused bool
dataPageNo uint32
isRootChanged bool
newRootPageNo uint32
indexPages []uint32
reusedIndexPages []uint32
indexPagesToWrite []PageToWrite
}
type WriterOptions struct {
Dir string
MetricID uint32
Value float64
Timestamp uint32
IsDataPageReused bool
DataPageNo uint32
Page []byte
}
// dataPage можно записати 1 раз. Щоб не заплутувати інтерфейс - передаю data сторінку
// через Options. Index сторінок може бути від 1 до N, тому виділяю окремий метод
func NewWriter(opt WriterOptions) (*Writer, error) {
if opt.Dir == "" {
return nil, errors.New("Dir option is required")
}
if opt.MetricID == 0 {
return nil, errors.New("MetricID option is required")
}
if opt.DataPageNo == 0 {
return nil, errors.New("DataPageNo option is required")
}
// if len(opt.Page) != octopus.DataPageSize {
// return nil, fmt.Errorf("bug: wrong data page size %d", len(opt.Page))
// }
s := &Writer{
fileName: JoinREDOFileName(opt.Dir, opt.MetricID),
metricID: opt.MetricID,
timestamp: opt.Timestamp,
value: opt.Value,
tmp: make([]byte, 21),
isDataPageReused: opt.IsDataPageReused,
dataPageNo: opt.DataPageNo,
hasher: crc32.NewIEEE(),
}
var err error
s.file, err = os.OpenFile(s.fileName, os.O_CREATE|os.O_WRONLY, 0770)
if err != nil {
return nil, err
}
err = s.init(opt.Page)
if err != nil {
return nil, err
}
return s, nil
}
/*
Формат:
4b metricID
8b value
4b timestamp
1b flags (reused)
4b dataPageNo
8KB dataPage
*/
func (s *Writer) init(dataPage []byte) error {
bin.PutUint32(s.tmp[0:], s.metricID)
bin.PutUint32(s.tmp[4:], s.timestamp)
bin.PutFloat64(s.tmp[8:], s.value)
if s.isDataPageReused {
s.tmp[16] = 1
}
bin.PutUint32(s.tmp[17:], s.dataPageNo)
_, err := s.file.Write(s.tmp)
if err != nil {
return err
}
_, err = s.file.Write(dataPage)
if err != nil {
return err
}
s.hasher.Write(s.tmp)
s.hasher.Write(dataPage)
return nil
}
/*
Формат
1b index page flags
4b indexPageNo
Nb indexPage
*/
func (s *Writer) AppendIndexPage(indexPageNo uint32, indexPage []byte, flags byte) error {
s.tmp[0] = flags
bin.PutUint32(s.tmp[1:], indexPageNo)
_, err := s.file.Write(s.tmp[:5])
if err != nil {
return err
}
_, err = s.file.Write(indexPage)
if err != nil {
return err
}
s.hasher.Write(s.tmp[:5])
s.hasher.Write(indexPage)
s.indexPages = append(s.indexPages, indexPageNo)
if (flags & FlagReused) == FlagReused {
s.reusedIndexPages = append(s.reusedIndexPages, indexPageNo)
}
if (flags & FlagNewRoot) == FlagNewRoot {
s.newRootPageNo = indexPageNo
s.isRootChanged = true
}
s.indexPagesToWrite = append(s.indexPagesToWrite,
PageToWrite{
PageNo: indexPageNo,
Data: indexPage,
})
return nil
}
func (s *Writer) IndexPagesToWrite() []PageToWrite {
return s.indexPagesToWrite
}
func (s *Writer) Close() (err error) {
// финализирую запись
bin.PutUint32(s.tmp, s.hasher.Sum32())
_, err = s.file.Write(s.tmp[:4])
if err != nil {
return err
}
err = s.file.Sync()
if err != nil {
return
}
return s.file.Close()
}
type Report struct {
FileName string
IsDataPageReused bool
DataPageNo uint32
IsRootChanged bool
NewRootPageNo uint32
ReusedIndexPages []uint32
}
func (s *Writer) GetReport() Report {
return Report{
FileName: s.fileName,
IsDataPageReused: s.isDataPageReused,
DataPageNo: s.dataPageNo,
//IndexPages: s.indexPages,
IsRootChanged: s.isRootChanged,
NewRootPageNo: s.newRootPageNo,
ReusedIndexPages: s.reusedIndexPages,
}
}
// HELPERS
func JoinREDOFileName(dir string, metricID uint32) string {
return filepath.Join(dir, fmt.Sprintf("m%d.redo", metricID))
}

@ -0,0 +1,619 @@
package atree
import (
"fmt"
octopus "gordenko.dev/dima/diploma"
)
type IterateAllCumulativeByTreeCursorReq struct {
FracDigits byte
PageNo uint32
EndTimestamp uint32
EndValue float64
ResponseWriter *CumulativeMeasureWriter
}
func (s *Atree) IterateAllCumulativeByTreeCursor(req IterateAllCumulativeByTreeCursorReq) error {
buf, err := s.fetchDataPage(req.PageNo)
if err != nil {
return err
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.PageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Cumulative,
})
if err != nil {
return err
}
defer treeCursor.Close()
var (
endTimestamp = req.EndTimestamp
endValue = req.EndValue
)
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue,
})
if err != nil {
return err
}
return nil
}
err = req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue - value,
})
if err != nil {
return err
}
endTimestamp = timestamp
endValue = value
}
}
type ContinueIterateCumulativeByTreeCursorReq struct {
FracDigits byte
Since uint32
Until uint32
LastPageNo uint32
EndTimestamp uint32
EndValue float64
ResponseWriter *CumulativeMeasureWriter
}
func (s *Atree) ContinueIterateCumulativeByTreeCursor(req ContinueIterateCumulativeByTreeCursorReq) error {
buf, err := s.fetchDataPage(req.LastPageNo)
if err != nil {
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.LastPageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Cumulative,
})
if err != nil {
return err
}
defer treeCursor.Close()
var (
endTimestamp = req.EndTimestamp
endValue = req.EndValue
)
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue,
})
if err != nil {
return err
}
return nil
}
if timestamp <= req.Until {
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue - value,
})
if err != nil {
return err
}
if timestamp < req.Since {
return nil
}
} else {
// bug panic
panic("continue cumulative but timestamp > req.Until")
}
}
}
type FindAndIterateCumulativeByTreeCursorReq struct {
FracDigits byte
Since uint32
Until uint32
RootPageNo uint32
ResponseWriter *CumulativeMeasureWriter
}
func (s *Atree) FindAndIterateCumulativeByTreeCursor(req FindAndIterateCumulativeByTreeCursorReq) error {
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
if err != nil {
return err
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: pageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Cumulative,
})
if err != nil {
return err
}
defer treeCursor.Close()
var (
endTimestamp uint32
endValue float64
)
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
if endTimestamp > 0 {
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue,
})
if err != nil {
return err
}
}
return nil
}
if timestamp > req.Until {
continue
}
if endTimestamp > 0 {
err := req.ResponseWriter.WriteMeasure(CumulativeMeasure{
Timestamp: endTimestamp,
Value: endValue,
Total: endValue - value,
})
if err != nil {
return err
}
}
endTimestamp = timestamp
endValue = value
if timestamp < req.Since {
return nil
}
}
}
type IterateAllInstantByTreeCursorReq struct {
FracDigits byte
PageNo uint32
ResponseWriter *InstantMeasureWriter
}
func (s *Atree) IterateAllInstantByTreeCursor(req IterateAllInstantByTreeCursorReq) error {
buf, err := s.fetchDataPage(req.PageNo)
if err != nil {
return err
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.PageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Instant,
})
if err != nil {
return err
}
defer treeCursor.Close()
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
return nil
}
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
Timestamp: timestamp,
Value: value,
})
if err != nil {
return err
}
}
}
type ContinueIterateInstantByTreeCursorReq struct {
FracDigits byte
Since uint32
Until uint32
LastPageNo uint32
ResponseWriter *InstantMeasureWriter
}
func (s *Atree) ContinueIterateInstantByTreeCursor(req ContinueIterateInstantByTreeCursorReq) error {
buf, err := s.fetchDataPage(req.LastPageNo)
if err != nil {
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.LastPageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Instant,
})
if err != nil {
return err
}
defer treeCursor.Close()
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
// - записи закончились;
return nil
}
if timestamp > req.Until {
panic("continue instant timestamp > req.Until")
}
if timestamp < req.Since {
return nil
}
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
Timestamp: timestamp,
Value: value,
})
if err != nil {
return err
}
}
}
type FindAndIterateInstantByTreeCursorReq struct {
FracDigits byte
Since uint32
Until uint32
RootPageNo uint32
ResponseWriter *InstantMeasureWriter
}
func (s *Atree) FindAndIterateInstantByTreeCursor(req FindAndIterateInstantByTreeCursorReq) error {
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
if err != nil {
return err
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: pageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Instant,
})
if err != nil {
return err
}
defer treeCursor.Close()
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done {
return nil
}
if timestamp > req.Until {
continue
}
if timestamp < req.Since {
return nil
}
err = req.ResponseWriter.WriteMeasure(InstantMeasure{
Timestamp: timestamp,
Value: value,
})
if err != nil {
return err
}
}
}
type ContinueCollectInstantPeriodsReq struct {
FracDigits byte
Aggregator *InstantAggregator
ResponseWriter *InstantPeriodsWriter
LastPageNo uint32
Since uint32
Until uint32
}
func (s *Atree) ContinueCollectInstantPeriods(req ContinueCollectInstantPeriodsReq) error {
buf, err := s.fetchDataPage(req.LastPageNo)
if err != nil {
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.LastPageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Instant,
})
if err != nil {
return err
}
defer treeCursor.Close()
var period InstantPeriod
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done || timestamp < req.Since {
isCompleted := req.Aggregator.FillPeriod(timestamp, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
return nil
}
if timestamp <= req.Until {
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
}
}
}
type FindInstantPeriodsReq struct {
FracDigits byte
ResponseWriter *InstantPeriodsWriter
RootPageNo uint32
Since uint32
Until uint32
GroupBy octopus.GroupBy
FirstHourOfDay int
LastDayOfMonth int
}
func (s *Atree) FindInstantPeriods(req FindInstantPeriodsReq) error {
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
if err != nil {
return err
}
aggregator, err := NewInstantAggregator(InstantAggregatorOptions{
GroupBy: req.GroupBy,
FirstHourOfDay: req.FirstHourOfDay,
LastDayOfMonth: req.LastDayOfMonth,
})
if err != nil {
return err
}
cursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: pageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Instant,
})
if err != nil {
return err
}
defer cursor.Close()
var period InstantPeriod
for {
timestamp, value, done, err := cursor.Prev()
if err != nil {
return err
}
if done || timestamp < req.Since {
isCompleted := aggregator.FillPeriod(timestamp, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
return nil
}
if timestamp <= req.Until {
isCompleted := aggregator.Feed(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
}
}
}
type FindCumulativePeriodsReq struct {
FracDigits byte
ResponseWriter *CumulativePeriodsWriter
RootPageNo uint32
Since uint32
Until uint32
GroupBy octopus.GroupBy
FirstHourOfDay int
LastDayOfMonth int
}
func (s *Atree) FindCumulativePeriods(req FindCumulativePeriodsReq) error {
pageNo, buf, err := s.findDataPage(req.RootPageNo, req.Until)
if err != nil {
return err
}
aggregator, err := NewCumulativeAggregator(CumulativeAggregatorOptions{
GroupBy: req.GroupBy,
FirstHourOfDay: req.FirstHourOfDay,
LastDayOfMonth: req.LastDayOfMonth,
})
if err != nil {
return err
}
cursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: pageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Cumulative,
})
if err != nil {
return err
}
defer cursor.Close()
var period CumulativePeriod
for {
timestamp, value, done, err := cursor.Prev()
if err != nil {
return err
}
if done || timestamp < req.Since {
isCompleted := aggregator.FillPeriod(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
return nil
}
if timestamp <= req.Until {
isCompleted := aggregator.Feed(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
}
}
}
type ContinueCollectCumulativePeriodsReq struct {
FracDigits byte
Aggregator *CumulativeAggregator
ResponseWriter *CumulativePeriodsWriter
LastPageNo uint32
Since uint32
Until uint32
}
func (s *Atree) ContinueCollectCumulativePeriods(req ContinueCollectCumulativePeriodsReq) error {
buf, err := s.fetchDataPage(req.LastPageNo)
if err != nil {
return fmt.Errorf("fetchDataPage(%d): %s", req.LastPageNo, err)
}
treeCursor, err := NewBackwardCursor(BackwardCursorOptions{
PageNo: req.LastPageNo,
PageData: buf,
Atree: s,
FracDigits: req.FracDigits,
MetricType: octopus.Cumulative,
})
if err != nil {
return err
}
defer treeCursor.Close()
var period CumulativePeriod
for {
timestamp, value, done, err := treeCursor.Prev()
if err != nil {
return err
}
if done || timestamp < req.Since {
isCompleted := req.Aggregator.FillPeriod(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
return nil
}
if timestamp <= req.Until {
isCompleted := req.Aggregator.Feed(timestamp, value, &period)
if isCompleted {
err := req.ResponseWriter.WritePeriod(period)
if err != nil {
return err
}
}
}
}
}

@ -0,0 +1,306 @@
package atree
import (
"bytes"
"fmt"
"io"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/proto"
)
// CURRENT VALUE WRITER
type CurrentValue struct {
MetricID uint32
Timestamp uint32
Value float64
}
type CurrentValueWriter struct {
arr []byte
responder *ChunkedResponder
}
func NewCurrentValueWriter(dst io.Writer) *CurrentValueWriter {
return &CurrentValueWriter{
arr: make([]byte, 16),
responder: NewChunkedResponder(dst),
}
}
func (s *CurrentValueWriter) BufferValue(m CurrentValue) {
bin.PutUint32(s.arr[0:], m.MetricID)
bin.PutUint32(s.arr[4:], m.Timestamp)
bin.PutFloat64(s.arr[8:], m.Value)
s.responder.BufferRecord(s.arr)
}
func (s *CurrentValueWriter) Close() error {
return s.responder.Flush()
}
// INSTANT MEASURE WRITER
type InstantMeasure struct {
Timestamp uint32
Value float64
}
type InstantMeasureWriter struct {
arr []byte
responder *ChunkedResponder
}
func NewInstantMeasureWriter(dst io.Writer) *InstantMeasureWriter {
return &InstantMeasureWriter{
arr: make([]byte, 12),
responder: NewChunkedResponder(dst),
}
}
func (s *InstantMeasureWriter) BufferMeasure(m InstantMeasure) {
bin.PutUint32(s.arr[0:], m.Timestamp)
bin.PutFloat64(s.arr[4:], m.Value)
s.responder.BufferRecord(s.arr)
}
func (s *InstantMeasureWriter) WriteMeasure(m InstantMeasure) error {
bin.PutUint32(s.arr[0:], m.Timestamp)
bin.PutFloat64(s.arr[4:], m.Value)
return s.responder.AppendRecord(s.arr)
}
func (s *InstantMeasureWriter) Close() error {
return s.responder.Flush()
}
// CUMULATIVE MEASURE WRITER
type CumulativeMeasure struct {
Timestamp uint32
Value float64
Total float64
}
type CumulativeMeasureWriter struct {
arr []byte
responder *ChunkedResponder
}
func NewCumulativeMeasureWriter(dst io.Writer) *CumulativeMeasureWriter {
return &CumulativeMeasureWriter{
arr: make([]byte, 20),
responder: NewChunkedResponder(dst),
}
}
func (s *CumulativeMeasureWriter) BufferMeasure(m CumulativeMeasure) {
bin.PutUint32(s.arr[0:], m.Timestamp)
bin.PutFloat64(s.arr[4:], m.Value)
bin.PutFloat64(s.arr[12:], m.Total)
s.responder.BufferRecord(s.arr)
}
func (s *CumulativeMeasureWriter) WriteMeasure(m CumulativeMeasure) error {
bin.PutUint32(s.arr[0:], m.Timestamp)
bin.PutFloat64(s.arr[4:], m.Value)
bin.PutFloat64(s.arr[12:], m.Total)
return s.responder.AppendRecord(s.arr)
}
func (s *CumulativeMeasureWriter) Close() error {
return s.responder.Flush()
}
// INSTANT AGGREGATE WRITER
type InstantPeriodsWriter struct {
aggregateFuncs byte
arr []byte
responder *ChunkedResponder
}
func NewInstantPeriodsWriter(dst io.Writer, aggregateFuncs byte) *InstantPeriodsWriter {
var q int
if (aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
q++
}
if (aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
q++
}
if (aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
q++
}
return &InstantPeriodsWriter{
aggregateFuncs: aggregateFuncs,
arr: make([]byte, 12+q*8),
responder: NewChunkedResponder(dst),
}
}
type InstantPeriod struct {
Period uint32
Since uint32
Until uint32
Min float64
Max float64
Avg float64
}
func (s *InstantPeriodsWriter) BufferMeasure(p InstantPeriod) {
s.pack(p)
s.responder.BufferRecord(s.arr)
}
func (s *InstantPeriodsWriter) WritePeriod(p InstantPeriod) error {
s.pack(p)
return s.responder.AppendRecord(s.arr)
}
func (s *InstantPeriodsWriter) Close() error {
return s.responder.Flush()
}
func (s *InstantPeriodsWriter) pack(p InstantPeriod) {
bin.PutUint32(s.arr[0:], p.Period)
bin.PutUint32(s.arr[4:], p.Since)
bin.PutUint32(s.arr[8:], p.Until)
pos := 12
if (s.aggregateFuncs & octopus.AggregateMin) == octopus.AggregateMin {
bin.PutFloat64(s.arr[pos:], p.Min)
pos += 8
}
if (s.aggregateFuncs & octopus.AggregateMax) == octopus.AggregateMax {
bin.PutFloat64(s.arr[pos:], p.Max)
pos += 8
}
if (s.aggregateFuncs & octopus.AggregateAvg) == octopus.AggregateAvg {
bin.PutFloat64(s.arr[pos:], p.Avg)
}
}
// CUMULATIVE AGGREGATE WRITER
type CumulativePeriodsWriter struct {
arr []byte
responder *ChunkedResponder
}
func NewCumulativePeriodsWriter(dst io.Writer) *CumulativePeriodsWriter {
return &CumulativePeriodsWriter{
arr: make([]byte, 28),
responder: NewChunkedResponder(dst),
}
}
type CumulativePeriod struct {
Period uint32
Since uint32
Until uint32
EndValue float64
Total float64
}
func (s *CumulativePeriodsWriter) BufferMeasure(p CumulativePeriod) {
s.pack(p)
s.responder.BufferRecord(s.arr)
}
func (s *CumulativePeriodsWriter) WritePeriod(p CumulativePeriod) error {
s.pack(p)
return s.responder.AppendRecord(s.arr)
}
func (s *CumulativePeriodsWriter) Close() error {
return s.responder.Flush()
}
func (s *CumulativePeriodsWriter) pack(p CumulativePeriod) {
bin.PutUint32(s.arr[0:], p.Period)
bin.PutUint32(s.arr[4:], p.Since)
bin.PutUint32(s.arr[8:], p.Until)
bin.PutFloat64(s.arr[12:], p.EndValue)
bin.PutFloat64(s.arr[20:], p.Total)
}
// CHUNKED RESPONDER
//const headerSize = 3
var endMsg = []byte{
proto.RespEndOfValue, // end of stream
}
type ChunkedResponder struct {
recordsQty int
buf *bytes.Buffer
dst io.Writer
}
func NewChunkedResponder(dst io.Writer) *ChunkedResponder {
s := &ChunkedResponder{
recordsQty: 0,
buf: bytes.NewBuffer(nil),
dst: dst,
}
s.buf.Write([]byte{
proto.RespPartOfValue, // message type
0, 0, 0, 0, // records qty
})
return s
}
func (s *ChunkedResponder) BufferRecord(rec []byte) {
s.buf.Write(rec)
s.recordsQty++
}
func (s *ChunkedResponder) AppendRecord(rec []byte) error {
s.buf.Write(rec)
s.recordsQty++
if s.buf.Len() < 1500 {
return nil
}
if err := s.sendBuffered(); err != nil {
return err
}
s.buf.Write([]byte{
proto.RespPartOfValue, // message type
0, 0, 0, 0, // records qty
})
s.recordsQty = 0
return nil
}
func (s *ChunkedResponder) Flush() error {
if s.recordsQty > 0 {
if err := s.sendBuffered(); err != nil {
return err
}
}
if _, err := s.dst.Write(endMsg); err != nil {
return err
}
return nil
}
func (s *ChunkedResponder) sendBuffered() (err error) {
msg := s.buf.Bytes()
bin.PutUint32(msg[1:], uint32(s.recordsQty))
n, err := s.dst.Write(msg)
if err != nil {
return
}
if n != len(msg) {
return fmt.Errorf("incomplete write %d bytes instead of %d", n, len(msg))
}
s.buf.Reset()
return
}

@ -0,0 +1,621 @@
package bin
import (
"errors"
"fmt"
"io"
"math"
)
const (
varInt64SignFlag = 0b01000000
maxReadAttempts = 5
)
var (
ErrNoSpace = errors.New("no space")
ErrIncompleteWrite = errors.New("incomplete write")
ErrReadOverflow = errors.New("bin: reader returned 'n' > bufsize")
// ErrNegativeReadCount shows 100% bug in the source reader
ErrNegativeReadCount = errors.New("bin: reader returned negative 'n'")
)
// READ
func ReadUint16(src io.Reader) (num uint16, err error) {
var (
q = 2
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return GetUint16(arr), nil
}
func ReadUint24AsInt(src io.Reader) (num int, err error) {
var (
q = 3
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return GetUint24AsInt(arr), nil
}
func ReadUint32(src io.Reader) (num uint32, err error) {
var (
q = 4
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return GetUint32(arr), nil
}
func ReadUint64(src io.Reader) (num uint64, err error) {
var (
q = 8
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return GetUint64(arr), nil
}
func ReadFloat64(src io.Reader) (num float64, err error) {
var (
q = 8
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return GetFloat64(arr), nil
}
func ReadUnixtime(src io.Reader) (num int64, err error) {
var (
q = 8
arr = make([]byte, q)
)
n, err := src.Read(arr)
if err != nil {
return
}
if n != q {
return 0, fmt.Errorf("read %d bytes only", n)
}
return int64(GetUint64(arr)), nil
}
// READ VAR
func ReadVarUint64(src io.Reader) (num uint64, n int, err error) {
var (
p = make([]byte, 1)
b byte
)
for i := range 8 {
_, err = src.Read(p)
if err != nil {
return
}
n++
b = p[0]
if b >= 128 {
num |= uint64(b&127) << uint(i*7)
return
}
num |= uint64(b) << uint(i*7)
}
_, err = src.Read(p)
if err != nil {
return
}
n++
num |= uint64(p[0]) << 56
return
}
// GET
func GetUint16(arr []byte) uint16 {
return uint16(arr[0]) | (uint16(arr[1]) << 8)
}
func GetUint16AsInt(arr []byte) int {
return int(arr[0]) | (int(arr[1]) << 8)
}
func GetUint24AsInt(arr []byte) int {
return int(arr[0]) | (int(arr[1]) << 8) | (int(arr[2]) << 16)
}
func GetUint32(arr []byte) uint32 {
return uint32(arr[0]) | (uint32(arr[1]) << 8) |
(uint32(arr[2]) << 16) | (uint32(arr[3]) << 24)
}
func GetUint32AsInt64(arr []byte) int64 {
u32 := uint32(arr[0]) | (uint32(arr[1]) << 8) |
(uint32(arr[2]) << 16) | (uint32(arr[3]) << 24)
return int64(u32)
}
func GetUint40(arr []byte) uint64 {
return uint64(arr[0]) | (uint64(arr[1]) << 8) |
(uint64(arr[2]) << 16) | (uint64(arr[3]) << 24) |
(uint64(arr[4]) << 32)
}
func GetUint48(arr []byte) uint64 {
return uint64(arr[0]) | (uint64(arr[1]) << 8) |
(uint64(arr[2]) << 16) | (uint64(arr[3]) << 24) |
(uint64(arr[4]) << 32) | (uint64(arr[5]) << 40)
}
func GetUint64(arr []byte) uint64 {
return uint64(arr[0]) | (uint64(arr[1]) << 8) |
(uint64(arr[2]) << 16) | (uint64(arr[3]) << 24) |
(uint64(arr[4]) << 32) | (uint64(arr[5]) << 40) |
(uint64(arr[6]) << 48) | (uint64(arr[7]) << 56)
}
func GetFloat32(arr []byte) float32 {
return math.Float32frombits(GetUint32(arr))
}
func GetFloat64(arr []byte) float64 {
return math.Float64frombits(GetUint64(arr))
}
func GetUnixtime(arr []byte) int64 {
u32 := uint32(arr[0]) | (uint32(arr[1]) << 8) |
(uint32(arr[2]) << 16) | (uint32(arr[3]) << 24)
return int64(u32)
}
func GetVarUint64(arr []byte) (num uint64, n int, err error) {
var b byte
for i := range 8 {
if i >= len(arr) {
return 0, 0, io.EOF
}
b = arr[i]
if b >= 128 {
num |= uint64(b&127) << uint(i*7)
return num, i + 1, nil
}
num |= uint64(b) << uint(i*7)
}
if len(arr) < 9 {
return 0, 0, io.EOF
}
return num | uint64(arr[8])<<56, 9, nil
}
func ReverseGetVarUint64(arr []byte) (num uint64, n int, err error) {
var (
b byte
j = len(arr) - 1
)
for i := range 8 {
if j < 0 {
return 0, 0, io.EOF
}
b = arr[j]
if b >= 128 {
num |= uint64(b&127) << uint(i*7)
return num, i + 1, nil
}
num |= uint64(b) << uint(i*7)
j--
}
if j < 0 {
return 0, 0, io.EOF
}
return num | uint64(arr[j])<<56, 9, nil
}
func GetVarInt64(arr []byte) (num int64, n int, err error) {
u64, n, err := GetVarUint64(arr)
if err != nil {
return
}
return DecodeZigZag(u64), n, nil
}
func ReverseGetVarInt64(arr []byte) (num int64, n int, err error) {
u64, n, err := ReverseGetVarUint64(arr)
if err != nil {
return
}
return DecodeZigZag(u64), n, nil
}
// PUT
func PutUint16(arr []byte, num uint16) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
}
func PutIntAsUint16(arr []byte, num int) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
}
func PutIntAsUint24(arr []byte, num int) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
}
func PutUint32(arr []byte, num uint32) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
arr[3] = byte(num >> 24)
}
func PutInt64AsUint32(arr []byte, num int64) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
arr[3] = byte(num >> 24)
}
func PutUint40(arr []byte, num uint64) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
arr[3] = byte(num >> 24)
arr[4] = byte(num >> 32)
}
func PutUint48(arr []byte, num uint64) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
arr[3] = byte(num >> 24)
arr[4] = byte(num >> 32)
arr[5] = byte(num >> 40)
}
func PutUint64(arr []byte, num uint64) {
arr[0] = byte(num)
arr[1] = byte(num >> 8)
arr[2] = byte(num >> 16)
arr[3] = byte(num >> 24)
arr[4] = byte(num >> 32)
arr[5] = byte(num >> 40)
arr[6] = byte(num >> 48)
arr[7] = byte(num >> 56)
}
func PutFloat32(arr []byte, num float32) {
PutUint32(arr, math.Float32bits(num))
}
func PutFloat64(arr []byte, num float64) {
PutUint64(arr, math.Float64bits(num))
}
// WRITE
func WriteUint16(dst io.Writer, num uint16) error {
arr := []byte{
byte(num),
byte(num >> 8),
}
n, err := dst.Write(arr)
if err != nil {
return err
}
if n != 2 {
return ErrIncompleteWrite
}
return err
}
func WriteUint32(dst io.Writer, num uint32) error {
arr := []byte{
byte(num),
byte(num >> 8),
byte(num >> 16),
byte(num >> 24),
}
n, err := dst.Write(arr)
if err != nil {
return err
}
if n != 4 {
return ErrIncompleteWrite
}
return err
}
func WriteFloat64(dst io.Writer, num float64) error {
arr := make([]byte, 8)
PutUint64(arr, math.Float64bits(num))
n, err := dst.Write(arr)
if err != nil {
return err
}
if n != 2 {
return ErrIncompleteWrite
}
return err
}
// WRITE VAR
func WriteVarUint64(dst io.Writer, num uint64) (int, error) {
arr := make([]byte, 9)
for i := range 8 {
arr[i] = byte(num & 127)
num >>= 7
if num == 0 {
arr[i] |= 128
size := i + 1
n, err := dst.Write(arr[:size])
if err != nil {
return n, err
}
if n != size {
return n, ErrIncompleteWrite
}
return size, nil
}
}
arr[8] = byte(num)
n, err := dst.Write(arr)
if err != nil {
return n, err
}
if n != 9 {
return n, ErrIncompleteWrite
}
return 9, nil
}
func PutVarUint64(arr []byte, num uint64) (int, error) {
for i := range 8 {
if i >= len(arr) {
return 0, ErrNoSpace
}
arr[i] = byte(num & 127)
num >>= 7
if num == 0 {
arr[i] |= 128
return i + 1, nil
}
}
if len(arr) < 9 {
return 0, ErrNoSpace
}
arr[8] = byte(num)
return 9, nil
}
func ReversePutVarUint64(arr []byte, num uint64) (int, error) {
var tmp [9]byte
for i := range 8 {
tmp[i] = byte(num & 127)
num >>= 7
if num == 0 {
tmp[i] |= 128
n := i + 1
if len(arr) < n {
return 0, ErrNoSpace
}
for j := i; j >= 0; j-- {
arr[i-j] = tmp[j]
}
return n, nil
}
}
tmp[8] = byte(num)
n := 9
if len(arr) < n {
return 0, ErrNoSpace
}
for j := 8; j >= 0; j-- {
arr[8-j] = tmp[j]
}
return n, nil
}
func PutVarUint64AtEnd(arr []byte, num uint64) (int, error) {
var (
tmp [9]byte
n int
)
for i := range 8 {
tmp[i] = byte(num & 127)
num >>= 7
if num == 0 {
tmp[i] |= 128
n = i + 1
break
}
}
if n == 0 {
tmp[8] = byte(num)
n = 9
}
if len(arr) < n {
return 0, ErrNoSpace
}
j := len(arr) - n
for i := range n {
arr[j] = tmp[i]
j++
}
return n, nil
}
func PutVarInt64(arr []byte, x int64) (int, error) {
return PutVarUint64(arr, EncodeZigZag(x))
}
func PutVarInt64AtEnd(arr []byte, x int64) (int, error) {
return PutVarUint64AtEnd(arr, EncodeZigZag(x))
}
func ReversePutVarInt64(arr []byte, x int64) (int, error) {
return ReversePutVarUint64(arr, EncodeZigZag(x))
}
type KeyComparator interface {
CompareTo(int) int
}
func BinarySearch(qty int, keyComparator KeyComparator) (elemIdx int, isFound bool) {
if qty == 0 {
return
}
a := 0
b := qty - 1
for {
var (
elemIdx = (b-a)/2 + a
code = keyComparator.CompareTo(elemIdx)
)
if code == 1 {
a = elemIdx + 1
if a > b {
return elemIdx + 1, false
}
} else if code == -1 {
b = elemIdx - 1
if b < a {
return elemIdx, false
}
} else {
return elemIdx, true
}
}
}
func DeleteReverseArrElem(arr []byte, qty int, elemSize int, idx int) {
dstIdx := len(arr) - idx*elemSize - 1
srcIdx := dstIdx - elemSize
end := len(arr) - qty*elemSize
for ; srcIdx >= end; srcIdx-- {
arr[dstIdx] = arr[srcIdx]
dstIdx--
}
for i := end; i < end+elemSize; i++ {
arr[i] = 0
}
}
// ZigZag
// ZigZag encoding: int64 -> uint64
func EncodeZigZag(x int64) uint64 {
return uint64(x<<1) ^ uint64(x>>63)
}
// ZigZag decoding: uint64 -> int64
func DecodeZigZag(u uint64) int64 {
return int64(u>>1) ^ -(int64(u & 1))
}
func ReadN(r io.Reader, n int) (_ []byte, err error) {
if n < 0 {
err = fmt.Errorf("wrong n=%d", n)
return
}
buf := make([]byte, n)
err = ReadNInto(r, buf)
if err != nil {
return
}
return buf, nil
}
func ReadNInto(r io.Reader, buf []byte) (err error) {
if len(buf) == 0 {
return
}
var q, total, readAttempts int
for readAttempts < maxReadAttempts {
bufsize := len(buf) - total
q, err = r.Read(buf[total:])
if q == bufsize {
return nil
}
if err != nil {
return
}
if q > bufsize {
err = ErrReadOverflow
return
}
if q < 0 {
err = ErrNegativeReadCount
return
}
if q == 0 {
readAttempts++
} else {
total += q
}
}
err = io.ErrNoProgress
return
}
func CalcVarUint64Length(num uint64) int {
for i := range 8 {
num >>= 7
if num == 0 {
return i + 1
}
}
return 9
}
func CalcVarInt64Length(num int64) int {
u64 := EncodeZigZag(num)
for i := range 8 {
u64 >>= 7
if u64 == 0 {
return i + 1
}
}
return 9
}

@ -0,0 +1,138 @@
package bufreader
import (
"errors"
"io"
)
const (
maxReadAttempts = 5
defaultBufSize = 1024
)
var (
// ErrReadOverflow shows 100% bug in the source reader
ErrReadOverflow = errors.New("bufreader: reader returned 'n' > bufsize")
// ErrNegativeReadCount shows 100% bug in the source reader
ErrNegativeReadCount = errors.New("bufreader: reader returned negative 'n'")
)
type BufferedReader struct {
r io.Reader
buf []byte
idx int
end int
totalRead int
}
func New(r io.Reader, bufsize int) *BufferedReader {
if bufsize == 0 {
bufsize = defaultBufSize
}
return &BufferedReader{
r: r,
buf: make([]byte, bufsize),
}
}
func (s *BufferedReader) safeRead(buf []byte) (n int, err error) {
readAttempts := 0
for readAttempts < maxReadAttempts {
n, err = s.r.Read(buf)
if n > 0 {
if n > len(buf) {
return 0, ErrReadOverflow
}
if err == io.EOF {
err = nil
}
return
}
if n < 0 {
return 0, ErrNegativeReadCount
}
// n == 0
if err != nil {
return
}
readAttempts++
}
return 0, io.ErrNoProgress
}
func (s *BufferedReader) fill() error {
n, err := s.safeRead(s.buf)
s.idx = 0
s.end = n
return err
}
func (s *BufferedReader) ReadByte() (b byte, err error) {
if s.idx == s.end {
if err = s.fill(); err != nil {
return
}
}
b = s.buf[s.idx]
s.idx++
s.totalRead++
return
}
func (s *BufferedReader) Read(buf []byte) (int, error) {
size := len(buf)
buffered := s.end - s.idx
if size <= buffered {
for i, b := range s.buf[s.idx : s.idx+size] {
buf[i] = b
}
s.idx += size
s.totalRead += len(buf)
return size, nil
}
for i, b := range s.buf[s.idx:s.end] {
buf[i] = b
}
s.idx = 0
s.end = 0
n := buffered
rbuf := buf[buffered:]
var (
q int
err error
)
for n < size {
q, err = s.safeRead(rbuf)
n += q
rbuf = rbuf[q:]
if err != nil {
if err == io.EOF && n == size {
s.totalRead += len(buf)
return n, nil
}
break
}
}
s.totalRead += len(buf[:n])
return n, err
}
func (s *BufferedReader) ReadN(size int) ([]byte, error) {
buf := make([]byte, size)
_, err := s.Read(buf)
if err != nil {
return nil, err
}
return buf, nil
}
func (s *BufferedReader) TotalRead() int {
return s.totalRead
}

@ -0,0 +1,3 @@
package chunkenc
const eps = 0.000001

@ -0,0 +1,330 @@
package chunkenc
import (
"fmt"
"math"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/conbuf"
)
// REVERSE
type ReverseCumulativeDeltaCompressor struct {
buf *conbuf.ContinuousBuffer
coef float64
pos int
firstValue float64
lastDelta uint64
length uint16
numIdx int
}
func NewReverseCumulativeDeltaCompressor(buf *conbuf.ContinuousBuffer, size int, fracDigits byte) *ReverseCumulativeDeltaCompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
s := &ReverseCumulativeDeltaCompressor{
buf: buf,
pos: size,
coef: coef,
}
if size > 0 {
s.restoreState()
}
return s
}
func (s *ReverseCumulativeDeltaCompressor) restoreState() {
u64, n, err := s.buf.GetVarUint64(0)
if err != nil {
panic(fmt.Sprintf("bug: get first value: %s", err))
}
s.firstValue = float64(u64) / s.coef
if s.pos > n {
pos := s.pos - 1
idxOf8 := uint(8 - s.buf.GetByte(pos))
pos--
s8 := s.buf.GetByte(pos)
pos--
var n int
s.lastDelta, n, err = s.buf.ReverseGetVarUint64(pos)
if err != nil {
panic(fmt.Sprintf("bug: get last delta: %s", err))
}
pos -= n
s.numIdx = pos + 1
var flag byte = 1 << idxOf8
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(pos)
}
}
}
func (s *ReverseCumulativeDeltaCompressor) Size() int {
return s.pos
}
func (s *ReverseCumulativeDeltaCompressor) CalcRequiredSpace(value float64) int {
if s.pos == 0 {
n := bin.CalcVarUint64Length(uint64(value * s.coef))
return n + 3
}
delta := uint64((value-s.firstValue)*s.coef + eps)
if delta == s.lastDelta {
if s.length == 0 {
return 1
} else {
newLength := s.length + 1
if newLength < 130 {
return 0
} else if newLength == 130 {
return 1
} else {
if newLength < 32769 {
return 0
} else {
n := bin.CalcVarUint64Length(delta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
}
} else {
n := bin.CalcVarUint64Length(delta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
func (s *ReverseCumulativeDeltaCompressor) Append(value float64) {
if s.pos == 0 {
n := s.buf.PutVarUint64(s.pos, uint64(value*s.coef))
s.pos += n
s.firstValue = value
s.encodeNewDelta(0, 0, 1)
} else {
delta := uint64((value-s.firstValue)*s.coef + eps)
if delta == s.lastDelta {
if s.length == 0 {
s.length = 2
s.shiftOnePosToRight()
s.buf.SetByte(s.numIdx-1, 0)
s8q := s.buf.GetByte(s.pos - 1)
s.buf.SetFlag(s.pos-2, 1<<(8-s8q))
} else {
s.length++
if s.length < 130 {
s.buf.SetByte(s.numIdx-1, byte(s.length-2))
} else if s.length == 130 {
s.shiftOnePosToRight()
s.encode2bLength()
} else {
if s.length < 32769 {
s.encode2bLength()
} else {
s.appendNewDelta(delta)
}
}
}
} else {
s.appendNewDelta(delta)
}
}
}
func (s *ReverseCumulativeDeltaCompressor) appendNewDelta(delta uint64) {
s.length = 0
s8 := s.buf.GetByte(s.pos - 2)
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
s.pos -= 1
s8 = 0
s8q = 1
} else {
s.pos -= 2
s8q++
}
s.encodeNewDelta(delta, s8, s8q)
}
func (s *ReverseCumulativeDeltaCompressor) encodeNewDelta(delta uint64, s8 byte, s8q byte) {
s.lastDelta = delta
s.numIdx = s.pos
n := s.buf.ReversePutVarUint64(s.pos, s.lastDelta)
s.pos += n
s.buf.SetByte(s.pos, s8)
s.pos++
s.buf.SetByte(s.pos, s8q)
s.pos++
}
func (s *ReverseCumulativeDeltaCompressor) shiftOnePosToRight() {
s.buf.ShiftOnePosToRight(s.numIdx, s.pos)
s.pos++
s.numIdx++
}
func (s *ReverseCumulativeDeltaCompressor) encode2bLength() {
num := s.length - 2
s.buf.SetByte(s.numIdx-1, byte(num&127)|128)
s.buf.SetByte(s.numIdx-2, byte(num>>7))
}
func (s *ReverseCumulativeDeltaCompressor) DeleteLast() {
var (
s8q = s.buf.GetByte(s.pos - 1)
s8 = s.buf.GetByte(s.pos - 2)
flag byte = 1 << uint(8-s8q)
)
if s.length > 0 {
if s.length == 2 {
s.length = 0
s.buf.UnsetFlag(s.pos-2, flag)
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
} else if s.length < 130 {
s.length--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else if s.length == 130 {
s.length--
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else {
s.length--
s.encode2bLength()
}
} else {
if s8q > 1 {
s8q--
flag = 1 << uint(8-s8q)
s.pos = s.numIdx + 2
s.buf.SetByte(s.pos-2, s8)
s.buf.SetByte(s.pos-1, s8q)
} else {
s.pos = s.numIdx + 1
s.buf.SetByte(s.pos-1, 8)
s8 = s.buf.GetByte(s.pos - 2)
flag = 1
}
var (
pos = s.pos - 3
n int
err error
)
s.lastDelta, n, err = s.buf.ReverseGetVarUint64(pos)
if err != nil {
panic(err)
}
s.numIdx = pos - n
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(s.numIdx - 1)
}
}
}
type ReverseCumulativeDeltaDecompressor struct {
buf *conbuf.ContinuousBuffer
pos int
bound int
firstValue float64
lastValue float64
length uint16
coef float64
idxOf8 uint
s8 byte
step byte
}
func NewReverseCumulativeDeltaDecompressor(buf *conbuf.ContinuousBuffer, size int, fracDigits byte) *ReverseCumulativeDeltaDecompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
return &ReverseCumulativeDeltaDecompressor{
buf: buf,
coef: coef,
pos: size,
}
}
func (s *ReverseCumulativeDeltaDecompressor) NextValue() (value float64, done bool) {
if s.step > 0 {
if s.length > 0 {
s.length--
return s.lastValue, false
}
if s.pos < s.bound {
return 0, true
}
if s.idxOf8 == 0 {
s.s8 = s.buf.GetByte(s.pos)
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
u64, n, err := s.buf.GetVarUint64(0)
if err != nil {
panic(err)
}
s.firstValue = float64(u64) / s.coef
s.bound = n
s.pos--
s.idxOf8 = uint(8 - s.buf.GetByte(s.pos))
s.pos--
s.s8 = s.buf.GetByte(s.pos)
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
s.step = 1
return s.lastValue, false
}
func (s *ReverseCumulativeDeltaDecompressor) readVar() {
u64, n, err := s.buf.ReverseGetVarUint64(s.pos)
if err != nil {
panic(err)
}
s.pos -= n
s.lastValue = s.firstValue + float64(u64)/s.coef
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.length, n = s.buf.DecodeRunLength(s.pos)
s.pos -= n
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}

@ -0,0 +1,345 @@
package chunkenc
import (
"fmt"
"math"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/conbuf"
)
// REVERSE
type ReverseInstantDeltaCompressor struct {
buf *conbuf.ContinuousBuffer
coef float64
pos int
firstValue float64
lastDelta int64
length uint16
numIdx int
}
func NewReverseInstantDeltaCompressor(buf *conbuf.ContinuousBuffer, size int, fracDigits byte) *ReverseInstantDeltaCompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
s := &ReverseInstantDeltaCompressor{
buf: buf,
pos: size,
coef: coef,
}
if size > 0 {
s.restoreState()
}
return s
}
func (s *ReverseInstantDeltaCompressor) restoreState() {
i64, n, err := s.buf.GetVarInt64(0)
if err != nil {
panic(fmt.Sprintf("bug: get first value: %s", err))
}
s.firstValue = float64(i64) / s.coef
if s.pos > n {
pos := s.pos - 1
idxOf8 := uint(8 - s.buf.GetByte(pos))
pos--
s8 := s.buf.GetByte(pos)
pos--
var n int
s.lastDelta, n, err = s.buf.ReverseGetVarInt64(pos)
if err != nil {
panic(fmt.Sprintf("bug: get last delta: %s", err))
}
pos -= n
s.numIdx = pos + 1
var flag byte = 1 << idxOf8
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(pos)
}
}
}
func (s *ReverseInstantDeltaCompressor) Size() int {
return s.pos
}
func (s *ReverseInstantDeltaCompressor) CalcRequiredSpace(value float64) int {
if s.pos == 0 {
n := bin.CalcVarInt64Length(int64(value * s.coef))
return n + 3
}
tmp := (value - s.firstValue) * s.coef
if tmp > 0 {
tmp += eps
} else {
tmp -= eps
}
delta := int64(tmp)
if delta == s.lastDelta {
if s.length == 0 {
return 1
} else {
newLength := s.length + 1
if newLength < 130 {
return 0
} else if newLength == 130 {
return 1
} else {
if newLength < 32769 {
return 0
} else {
n := bin.CalcVarInt64Length(delta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
}
} else {
n := bin.CalcVarInt64Length(delta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
// В начале буфера кодирую базовое значение.
func (s *ReverseInstantDeltaCompressor) Append(value float64) {
if s.pos == 0 {
n := s.buf.PutVarInt64(s.pos, int64(value*s.coef))
s.pos += n
s.firstValue = value
s.encodeNewDelta(0, 0, 1)
} else {
tmp := (value - s.firstValue) * s.coef
if tmp > 0 {
tmp += eps
} else {
tmp -= eps
}
delta := int64(tmp)
if delta == s.lastDelta {
if s.length == 0 {
s.length = 2
s.shiftOnePosToRight()
s.buf.SetByte(s.numIdx-1, 0)
s8q := s.buf.GetByte(s.pos - 1)
s.buf.SetFlag(s.pos-2, 1<<(8-s8q))
} else {
s.length++
if s.length < 130 {
s.buf.SetByte(s.numIdx-1, byte(s.length-2))
} else if s.length == 130 {
s.shiftOnePosToRight()
s.encode2bLength()
} else {
if s.length < 32769 {
s.encode2bLength()
} else {
s.appendNewDelta(delta)
}
}
}
} else {
s.appendNewDelta(delta)
}
}
}
func (s *ReverseInstantDeltaCompressor) appendNewDelta(delta int64) {
s.length = 0
s8 := s.buf.GetByte(s.pos - 2)
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
s.pos -= 1
s8 = 0
s8q = 1
} else {
s.pos -= 2
s8q++
}
s.encodeNewDelta(delta, s8, s8q)
}
func (s *ReverseInstantDeltaCompressor) encodeNewDelta(delta int64, s8 byte, s8q byte) {
s.lastDelta = delta
s.numIdx = s.pos
n := s.buf.ReversePutVarInt64(s.pos, s.lastDelta)
s.pos += n
s.buf.SetByte(s.pos, s8)
s.pos++
s.buf.SetByte(s.pos, s8q)
s.pos++
}
func (s *ReverseInstantDeltaCompressor) shiftOnePosToRight() {
s.buf.ShiftOnePosToRight(s.numIdx, s.pos)
s.pos++
s.numIdx++
}
func (s *ReverseInstantDeltaCompressor) encode2bLength() {
num := s.length - 2
s.buf.SetByte(s.numIdx-1, byte(num&127)|128)
s.buf.SetByte(s.numIdx-2, byte(num>>7))
}
func (s *ReverseInstantDeltaCompressor) DeleteLast() {
var (
s8q = s.buf.GetByte(s.pos - 1)
s8 = s.buf.GetByte(s.pos - 2)
flag byte = 1 << uint(8-s8q)
)
if s.length > 0 {
if s.length == 2 {
s.length = 0
s.buf.UnsetFlag(s.pos-2, flag)
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
} else if s.length < 130 {
s.length--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else if s.length == 130 {
s.length--
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else {
s.length--
s.encode2bLength()
}
} else {
if s8q > 1 {
s8q--
flag = 1 << uint(8-s8q)
s.pos = s.numIdx + 2
s.buf.SetByte(s.pos-2, s8)
s.buf.SetByte(s.pos-1, s8q)
} else {
s.pos = s.numIdx + 1
s.buf.SetByte(s.pos-1, 8)
s8 = s.buf.GetByte(s.pos - 2)
flag = 1
}
var (
pos = s.pos - 3
n int
err error
)
s.lastDelta, n, err = s.buf.ReverseGetVarInt64(pos)
if err != nil {
panic(err)
}
s.numIdx = pos - n
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(s.numIdx - 1)
}
}
}
type ReverseInstantDeltaDecompressor struct {
step byte
buf *conbuf.ContinuousBuffer
pos int
bound int
firstValue float64
lastValue float64
length uint16
coef float64
idxOf8 uint
s8 byte
}
func NewReverseInstantDeltaDecompressor(buf *conbuf.ContinuousBuffer, size int, fracDigits byte) *ReverseInstantDeltaDecompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
return &ReverseInstantDeltaDecompressor{
buf: buf,
coef: coef,
pos: size,
}
}
func (s *ReverseInstantDeltaDecompressor) NextValue() (value float64, done bool) {
if s.step > 0 {
if s.length > 0 {
s.length--
return s.lastValue, false
}
if s.pos < s.bound {
return 0, true
}
if s.idxOf8 == 0 {
s.s8 = s.buf.GetByte(s.pos)
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
i64, n, err := s.buf.GetVarInt64(0)
if err != nil {
panic(err)
}
s.firstValue = float64(i64) / s.coef
s.bound = n
s.pos--
s.idxOf8 = uint(8 - s.buf.GetByte(s.pos))
s.pos--
s.s8 = s.buf.GetByte(s.pos)
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
s.step = 1
return s.lastValue, false
}
func (s *ReverseInstantDeltaDecompressor) readVar() {
i64, n, err := s.buf.ReverseGetVarInt64(s.pos)
if err != nil {
panic(err)
}
s.pos -= n
s.lastValue = s.firstValue + float64(i64)/s.coef
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.length, n = s.buf.DecodeRunLength(s.pos)
s.pos -= n
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}

@ -0,0 +1,374 @@
package chunkenc
import (
"fmt"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/conbuf"
)
// REVERSE
const (
lastUnixtimeIdx = 0
baseDeltaIdx = 4
)
type ReverseTimeDeltaOfDeltaCompressor struct {
buf *conbuf.ContinuousBuffer
pos int
baseDelta uint32
lastUnixtime uint32
lastDeltaOfDelta int64
length uint16
numIdx int
}
func NewReverseTimeDeltaOfDeltaCompressor(buf *conbuf.ContinuousBuffer, size int) *ReverseTimeDeltaOfDeltaCompressor {
s := &ReverseTimeDeltaOfDeltaCompressor{
buf: buf,
pos: size,
}
if size > 0 {
s.restoreState()
}
return s
}
func (s *ReverseTimeDeltaOfDeltaCompressor) restoreState() {
s.lastUnixtime = s.buf.GetUint32(lastUnixtimeIdx)
if s.pos > 4 {
u64, _, err := s.buf.GetVarUint64(baseDeltaIdx)
if err != nil {
panic(fmt.Sprintf("bug: get base delta: %s", err))
}
s.baseDelta = uint32(u64)
pos := s.pos - 1
idxOf8 := uint(8 - s.buf.GetByte(pos))
pos--
s8 := s.buf.GetByte(pos)
pos--
var n int
s.lastDeltaOfDelta, n, err = s.buf.ReverseGetVarInt64(pos)
if err != nil {
panic(err)
}
pos -= n
s.numIdx = pos + 1
var flag byte = 1 << idxOf8
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(pos)
}
}
}
func (s *ReverseTimeDeltaOfDeltaCompressor) Size() int {
return s.pos
}
func (s *ReverseTimeDeltaOfDeltaCompressor) CalcRequiredSpace(unixtime uint32) int {
if s.pos == 0 {
return 4
}
if s.baseDelta == 0 {
baseDelta := unixtime - s.lastUnixtime
n := bin.CalcVarUint64Length(uint64(baseDelta))
return n + 3
}
deltaOfDelta := int64(unixtime-s.lastUnixtime) - int64(s.baseDelta)
if deltaOfDelta == s.lastDeltaOfDelta {
if s.length == 0 {
return 1
} else {
newLength := s.length + 1
if newLength < 130 {
return 0
} else if newLength == 130 {
return 1
} else {
if newLength < 32769 {
return 0
} else {
n := bin.CalcVarInt64Length(deltaOfDelta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
}
} else {
n := bin.CalcVarInt64Length(deltaOfDelta)
n += 2
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
n -= 1
} else {
n -= 2
}
return n
}
}
func (s *ReverseTimeDeltaOfDeltaCompressor) Append(unixtime uint32) {
if s.pos == 0 {
s.lastUnixtime = unixtime
s.buf.PutUint32(lastUnixtimeIdx, unixtime)
s.pos += 4
return
}
if s.baseDelta == 0 {
s.baseDelta = unixtime - s.lastUnixtime
s.lastDeltaOfDelta = 0
s.lastUnixtime = unixtime
s.buf.PutUint32(lastUnixtimeIdx, unixtime)
n := s.buf.PutVarUint64(s.pos, uint64(s.baseDelta))
s.pos += n
s.encodeNewDeltaOfDelta(0, 0, 1)
return
}
deltaOfDelta := int64(unixtime-s.lastUnixtime) - int64(s.baseDelta)
s.lastUnixtime = unixtime
s.buf.PutUint32(lastUnixtimeIdx, unixtime)
if deltaOfDelta == s.lastDeltaOfDelta {
if s.length == 0 {
s.length = 2
s.shiftOnePosToRight()
s.buf.SetByte(s.numIdx-1, 0)
s8q := s.buf.GetByte(s.pos - 1)
s.buf.SetFlag(s.pos-2, 1<<(8-s8q))
} else {
s.length++
if s.length < 130 {
s.buf.SetByte(s.numIdx-1, byte(s.length-2))
} else if s.length == 130 {
s.shiftOnePosToRight()
s.encode2bLength()
} else {
if s.length < 32769 {
s.encode2bLength()
} else {
s.appendNewDeltaOfDelta(deltaOfDelta)
}
}
}
} else {
s.appendNewDeltaOfDelta(deltaOfDelta)
}
}
func (s *ReverseTimeDeltaOfDeltaCompressor) appendNewDeltaOfDelta(deltaOfDelta int64) {
s.length = 0
s8 := s.buf.GetByte(s.pos - 2)
s8q := s.buf.GetByte(s.pos - 1)
if s8q == 8 {
s.pos -= 1
s8 = 0
s8q = 1
} else {
s.pos -= 2
s8q++
}
s.encodeNewDeltaOfDelta(deltaOfDelta, s8, s8q)
}
func (s *ReverseTimeDeltaOfDeltaCompressor) encodeNewDeltaOfDelta(deltaOfDelta int64, s8 byte, s8q byte) {
s.lastDeltaOfDelta = deltaOfDelta
s.numIdx = s.pos
n := s.buf.ReversePutVarInt64(s.pos, deltaOfDelta)
s.pos += n
s.buf.SetByte(s.pos, s8)
s.pos++
s.buf.SetByte(s.pos, s8q)
s.pos++
}
func (s *ReverseTimeDeltaOfDeltaCompressor) shiftOnePosToRight() {
s.buf.ShiftOnePosToRight(s.numIdx, s.pos)
s.pos++
s.numIdx++
}
func (s *ReverseTimeDeltaOfDeltaCompressor) encode2bLength() {
num := s.length - 2
s.buf.SetByte(s.numIdx-1, byte(num&127)|128)
s.buf.SetByte(s.numIdx-2, byte(num>>7))
}
func (s *ReverseTimeDeltaOfDeltaCompressor) DeleteLast() {
var (
s8q = s.buf.GetByte(s.pos - 1)
s8 = s.buf.GetByte(s.pos - 2)
flag byte = 1 << uint(8-s8q)
)
if s.length > 0 {
if s.length == 2 {
s.length = 0
s.buf.UnsetFlag(s.pos-2, flag)
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
} else if s.length < 130 {
s.length--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else if s.length == 130 {
s.length--
s.buf.ShiftOnePosToLeft(s.numIdx, s.pos)
s.numIdx--
s.pos--
s.buf.SetByte(s.numIdx-1, byte(s.length)-2)
} else {
s.length--
s.encode2bLength()
}
} else {
if s8q > 1 {
s8q--
flag = 1 << uint(8-s8q)
s.pos = s.numIdx + 2
s.buf.SetByte(s.pos-2, s8)
s.buf.SetByte(s.pos-1, s8q)
} else {
s.pos = s.numIdx + 1
s.buf.SetByte(s.pos-1, 8)
s8 = s.buf.GetByte(s.pos - 2)
flag = 1
}
var (
pos = s.pos - 3
n int
err error
)
s.lastDeltaOfDelta, n, err = s.buf.ReverseGetVarInt64(pos)
if err != nil {
panic(err)
}
s.numIdx = pos - n
if (s8 & flag) == flag {
s.length, _ = s.buf.DecodeRunLength(s.numIdx - 1)
}
}
delta := int64(s.baseDelta) + s.lastDeltaOfDelta
s.lastUnixtime = uint32(int64(s.lastUnixtime) - delta)
s.buf.PutUint32(lastUnixtimeIdx, s.lastUnixtime)
}
type ReverseTimeDeltaOfDeltaDecompressor struct {
step byte
buf *conbuf.ContinuousBuffer
pos int
bound int
lastUnixtime uint32
baseDelta uint32
lastDeltaOfDelta int64
length uint16
idxOf8 uint
s8 byte
}
func NewReverseTimeDeltaOfDeltaDecompressor(buf *conbuf.ContinuousBuffer, size int) *ReverseTimeDeltaOfDeltaDecompressor {
return &ReverseTimeDeltaOfDeltaDecompressor{
buf: buf,
pos: size,
}
}
func (s *ReverseTimeDeltaOfDeltaDecompressor) NextValue() (value uint32, done bool) {
if s.step == 0 {
if s.pos == 0 {
return 0, true
}
s.lastUnixtime = s.buf.GetUint32(lastUnixtimeIdx)
s.step = 1
return s.lastUnixtime, false
}
if s.step == 1 {
if s.pos == baseDeltaIdx {
return 0, true
}
u64, n, err := s.buf.GetVarUint64(baseDeltaIdx)
if err != nil {
panic("EOF")
}
s.bound = baseDeltaIdx + n
s.baseDelta = uint32(u64)
s.pos--
s.idxOf8 = uint(8 - s.buf.GetByte(s.pos))
s.pos--
s.s8 = s.buf.GetByte(s.pos)
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
s.step = 2
return s.lastUnixtime, false
}
if s.length > 0 {
s.length--
delta := int64(s.baseDelta) + s.lastDeltaOfDelta
s.lastUnixtime = uint32(int64(s.lastUnixtime) - delta)
return s.lastUnixtime, false
}
if s.pos < s.bound {
return 0, true
}
if s.idxOf8 == 0 {
s.s8 = s.buf.GetByte(s.pos)
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastUnixtime, false
}
func (s *ReverseTimeDeltaOfDeltaDecompressor) readVar() {
var (
n int
err error
)
s.lastDeltaOfDelta, n, err = s.buf.ReverseGetVarInt64(s.pos)
if err != nil {
panic(err)
}
s.pos -= n
delta := int64(s.baseDelta) + s.lastDeltaOfDelta
s.lastUnixtime = uint32(int64(s.lastUnixtime) - delta)
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.length, n = s.buf.DecodeRunLength(s.pos)
s.pos -= n
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}

@ -0,0 +1,755 @@
package client
import (
"fmt"
"net"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/bufreader"
"gordenko.dev/dima/diploma/proto"
)
const (
metricKeySize = 4
)
type Error struct {
Code uint16
Message string
}
func (s Error) Error() string {
return fmt.Sprintf("%d: %s", s.Code, s.Message)
}
type Connection struct {
conn net.Conn
src *bufreader.BufferedReader
}
func Connect(address string) (*Connection, error) {
conn, err := net.Dial("tcp", address)
if err != nil {
return nil, err
}
return &Connection{
conn: conn,
src: bufreader.New(conn, 1500),
}, nil
}
func (s *Connection) String() string {
return s.conn.LocalAddr().String()
}
func (s *Connection) Close() {
s.conn.Close()
}
func (s *Connection) mustSuccess(reader *bufreader.BufferedReader) (err error) {
code, err := reader.ReadByte()
if err != nil {
return fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespSuccess:
return nil // ok
case proto.RespError:
return s.onError()
default:
return fmt.Errorf("unknown reponse code %d", code)
}
}
type Metric struct {
MetricID uint32
MetricType diploma.MetricType
FracDigits byte
}
func (s *Connection) AddMetric(req Metric) error {
arr := []byte{
proto.TypeAddMetric,
0, 0, 0, 0, //
byte(req.MetricType),
byte(req.FracDigits),
}
bin.PutUint32(arr[1:], req.MetricID)
if _, err := s.conn.Write(arr); err != nil {
return err
}
return s.mustSuccess(s.src)
}
func (s *Connection) GetMetric(metricID uint32) (*Metric, error) {
arr := []byte{
proto.TypeGetMetric,
0, 0, 0, 0,
}
bin.PutUint32(arr[1:], metricID)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespValue:
arr, err := s.src.ReadN(6)
if err != nil {
return nil, fmt.Errorf("read body: %s", err)
}
return &Metric{
MetricID: bin.GetUint32(arr),
MetricType: diploma.MetricType(arr[4]),
FracDigits: arr[5],
}, nil
case proto.RespError:
return nil, s.onMaybeError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
func (s *Connection) DeleteMetric(metricID uint32) error {
arr := []byte{
proto.TypeDeleteMetric,
0, 0, 0, 0, //
}
bin.PutUint32(arr[1:], metricID)
if _, err := s.conn.Write(arr); err != nil {
return err
}
return s.mustSuccess(s.src)
}
type AppendMeasureReq struct {
MetricID uint32
Timestamp uint32
Value float64
}
func (s *Connection) AppendMeasure(req AppendMeasureReq) (err error) {
arr := []byte{
proto.TypeAppendMeasure,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // timestamp
0, 0, 0, 0, 0, 0, 0, 0, // value
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Timestamp)
bin.PutFloat64(arr[9:], req.Value)
if _, err := s.conn.Write(arr); err != nil {
return err
}
return s.mustSuccess(s.src)
}
type AppendMeasuresReq struct {
MetricID uint32
Measures []Measure
}
type Measure struct {
Timestamp uint32
Value float64
}
func (s *Connection) AppendMeasures(req AppendMeasuresReq) (err error) {
if len(req.Measures) > 65535 {
return fmt.Errorf("wrong measures qty: %d", len(req.Measures))
}
var (
prefixSize = 7
recordSize = 12
arr = make([]byte, prefixSize+len(req.Measures)*recordSize)
)
arr[0] = proto.TypeAppendMeasures
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint16(arr[5:], uint16(len(req.Measures)))
pos := prefixSize
for _, measure := range req.Measures {
bin.PutUint32(arr[pos:], measure.Timestamp)
bin.PutFloat64(arr[pos+4:], measure.Value)
pos += recordSize
}
if _, err := s.conn.Write(arr); err != nil {
return err
}
return s.mustSuccess(s.src)
}
type InstantMeasure struct {
Timestamp uint32
Value float64
}
func (s *Connection) ListAllInstantMeasures(metricID uint32) ([]InstantMeasure, error) {
arr := []byte{
proto.TypeListAllInstantMeasures,
0, 0, 0, 0, // metricID
}
bin.PutUint32(arr[1:], metricID)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []InstantMeasure
tmp = make([]byte, 12)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, InstantMeasure{
Timestamp: bin.GetUint32(tmp),
Value: bin.GetFloat64(tmp[4:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
func (s *Connection) ListInstantMeasures(req proto.ListInstantMeasuresReq) ([]InstantMeasure, error) {
arr := []byte{
proto.TypeListInstantMeasures,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
0, 0, 0, 0, // until
byte(req.FirstHourOfDay),
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
bin.PutUint32(arr[9:], req.Until)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []InstantMeasure
tmp = make([]byte, 12)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, InstantMeasure{
Timestamp: bin.GetUint32(tmp),
Value: bin.GetFloat64(tmp[4:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
type CumulativeMeasure struct {
Timestamp uint32
Value float64
Total float64
}
func (s *Connection) ListAllCumulativeMeasures(metricID uint32) ([]CumulativeMeasure, error) {
arr := []byte{
proto.TypeListAllCumulativeMeasures,
0, 0, 0, 0, // metricID
}
bin.PutUint32(arr[1:], metricID)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []CumulativeMeasure
tmp = make([]byte, 20)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, CumulativeMeasure{
Timestamp: bin.GetUint32(tmp),
Value: bin.GetFloat64(tmp[4:]),
Total: bin.GetFloat64(tmp[12:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
func (s *Connection) ListCumulativeMeasures(req proto.ListCumulativeMeasuresReq) ([]CumulativeMeasure, error) {
arr := []byte{
proto.TypeListCumulativeMeasures,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
0, 0, 0, 0, // until
byte(req.FirstHourOfDay),
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
bin.PutUint32(arr[9:], req.Until)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []CumulativeMeasure
tmp = make([]byte, 20)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, CumulativeMeasure{
Timestamp: bin.GetUint32(tmp),
Value: bin.GetFloat64(tmp[4:]),
Total: bin.GetFloat64(tmp[12:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
type InstantPeriod struct {
Period uint32
Since uint32
Until uint32
Min float64
Max float64
Avg float64
}
func (s *Connection) ListInstantPeriods(req proto.ListInstantPeriodsReq) ([]InstantPeriod, error) {
arr := []byte{
proto.TypeListInstantPeriods,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
0, 0, 0, 0, // until
byte(req.GroupBy),
req.AggregateFuncs,
byte(req.FirstHourOfDay),
byte(req.LastDayOfMonth),
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
bin.PutUint32(arr[9:], req.Until)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var q int
if (req.AggregateFuncs & diploma.AggregateMin) == diploma.AggregateMin {
q++
}
if (req.AggregateFuncs & diploma.AggregateMax) == diploma.AggregateMax {
q++
}
if (req.AggregateFuncs & diploma.AggregateAvg) == diploma.AggregateAvg {
q++
}
var (
result []InstantPeriod
// 12 bytes - period, since, until
// q * 8 bytes - min, max, avg
tmp = make([]byte, 12+q*8)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
var (
p = InstantPeriod{
Period: bin.GetUint32(tmp[0:]),
Since: bin.GetUint32(tmp[4:]),
Until: bin.GetUint32(tmp[8:]),
}
// 12 bytes - period, since, until
pos = 12
)
if (req.AggregateFuncs & diploma.AggregateMin) == diploma.AggregateMin {
p.Min = bin.GetFloat64(tmp[pos:])
pos += 8
}
if (req.AggregateFuncs & diploma.AggregateMax) == diploma.AggregateMax {
p.Max = bin.GetFloat64(tmp[pos:])
pos += 8
}
if (req.AggregateFuncs & diploma.AggregateAvg) == diploma.AggregateAvg {
p.Avg = bin.GetFloat64(tmp[pos:])
}
result = append(result, p)
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
type CumulativePeriod struct {
Period uint32
Since uint32
Until uint32
EndValue float64
Total float64
}
func (s *Connection) ListCumulativePeriods(req proto.ListCumulativePeriodsReq) ([]CumulativePeriod, error) {
arr := []byte{
proto.TypeListCumulativePeriods,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
0, 0, 0, 0, // until
byte(req.GroupBy),
byte(req.FirstHourOfDay),
byte(req.LastDayOfMonth),
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
bin.PutUint32(arr[9:], req.Until)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []CumulativePeriod
tmp = make([]byte, 28)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, CumulativePeriod{
Period: bin.GetUint32(tmp[0:]),
Since: bin.GetUint32(tmp[4:]),
Until: bin.GetUint32(tmp[8:]),
EndValue: bin.GetFloat64(tmp[12:]),
Total: bin.GetFloat64(tmp[20:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
type CurrentValue struct {
MetricID uint32
Timestamp uint32
Value float64
}
func (s *Connection) ListCurrentValues(metricIDs []uint32) ([]CurrentValue, error) {
arr := make([]byte, 3+metricKeySize*len(metricIDs))
arr[0] = proto.TypeListCurrentValues
bin.PutUint16(arr[1:], uint16(len(metricIDs)))
off := 3
for _, metricID := range metricIDs {
bin.PutUint32(arr[off:], metricID)
off += metricKeySize
}
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
var (
result []CurrentValue
tmp = make([]byte, 16)
)
for {
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespPartOfValue:
q, err := bin.ReadUint32(s.src)
if err != nil {
return nil, fmt.Errorf("read records qty: %s", err)
}
for i := range int(q) {
err = bin.ReadNInto(s.src, tmp)
if err != nil {
return nil, fmt.Errorf("read record #%d: %s", i, err)
}
result = append(result, CurrentValue{
MetricID: bin.GetUint32(tmp),
Timestamp: bin.GetUint32(tmp[4:]),
Value: bin.GetFloat64(tmp[8:]),
})
}
case proto.RespEndOfValue:
return result, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
}
func (s *Connection) DeleteMeasures(req proto.DeleteMeasuresReq) (err error) {
arr := []byte{
proto.TypeDeleteMeasures,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
if _, err := s.conn.Write(arr); err != nil {
return err
}
return s.mustSuccess(s.src)
}
type RangeTotalResp struct {
Since uint32
SinceValue float64
Until uint32
UntilValue float64
}
func (s *Connection) RangeTotal(req proto.RangeTotalReq) (*RangeTotalResp, error) {
arr := []byte{
proto.TypeGetMetric,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Since)
bin.PutUint32(arr[9:], req.MetricID)
if _, err := s.conn.Write(arr); err != nil {
return nil, err
}
code, err := s.src.ReadByte()
if err != nil {
return nil, fmt.Errorf("read response code: %s", err)
}
switch code {
case proto.RespValue:
arr, err := s.src.ReadN(24)
if err != nil {
return nil, fmt.Errorf("read body: %s", err)
}
return &RangeTotalResp{
Since: bin.GetUint32(arr),
SinceValue: bin.GetFloat64(arr[4:]),
Until: bin.GetUint32(arr[12:]),
UntilValue: bin.GetFloat64(arr[16:]),
}, nil
case proto.RespError:
return nil, s.onError()
default:
return nil, fmt.Errorf("unknown reponse code %d", code)
}
}
func (s *Connection) onError() error {
errorCode, err := bin.ReadUint16(s.src)
if err != nil {
return fmt.Errorf("read error code: %s", err)
}
return Error{
Code: errorCode,
Message: proto.ErrorCodeToText(errorCode),
}
}
func (s *Connection) onMaybeError() error {
errorCode, err := bin.ReadUint16(s.src)
if err != nil {
return fmt.Errorf("read error code: %s", err)
}
if errorCode == proto.ErrNoMetric {
return nil
}
return Error{
Code: errorCode,
Message: proto.ErrorCodeToText(errorCode),
}
}

@ -0,0 +1,459 @@
package conbuf
import (
"errors"
"fmt"
"io"
"gordenko.dev/dima/diploma/bin"
)
const chunkSize = 512
var (
ErrOutOfRange = errors.New("out of range")
)
type ContinuousBuffer struct {
chunks [][]byte
}
func NewFromBuffer(buf []byte) *ContinuousBuffer {
var (
chunks [][]byte
copied = 0
)
for copied < len(buf) {
chunk := make([]byte, chunkSize)
end := min(copied+chunkSize, len(buf))
copy(chunk, buf[copied:end])
chunks = append(chunks, chunk)
copied += end - copied
}
return &ContinuousBuffer{
chunks: chunks,
}
}
func New(chunks [][]byte) *ContinuousBuffer {
for i, chunk := range chunks {
if len(chunk) != chunkSize {
panic(fmt.Sprintf("wrong chunk #%d size %d", i, len(chunk)))
}
}
return &ContinuousBuffer{
chunks: chunks,
}
}
func (s *ContinuousBuffer) Chunks() [][]byte {
return s.chunks
}
// [0, pos)
func (s *ContinuousBuffer) GetByte(idx int) byte {
chunkIdx := idx / chunkSize
if chunkIdx >= len(s.chunks) {
panic(ErrOutOfRange)
}
byteIdx := idx % chunkSize
return s.chunks[chunkIdx][byteIdx]
}
func (s *ContinuousBuffer) SetByte(idx int, b byte) {
chunkIdx := idx / chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
byteIdx := idx % chunkSize
s.chunks[chunkIdx][byteIdx] = b
}
func (s *ContinuousBuffer) SetFlag(idx int, flag byte) {
chunkIdx := idx / chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
byteIdx := idx % chunkSize
s.chunks[chunkIdx][byteIdx] |= flag
}
func (s *ContinuousBuffer) UnsetFlag(idx int, flag byte) {
chunkIdx := idx / chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
byteIdx := idx % chunkSize
s.chunks[chunkIdx][byteIdx] &^= flag
}
// [since, until)
func (s *ContinuousBuffer) ShiftOnePosToRight(since int, until int) {
if since < 0 {
panic("since < 0")
}
if since >= until {
panic("since >= until")
}
chunkIdx := until / chunkSize
byteIdx := until % chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
if byteIdx == 0 {
s.chunks = append(s.chunks, make([]byte, chunkSize))
} else {
panic(ErrOutOfRange)
}
}
var (
qty = until - since
prevChunkIdx int
prevByteIdx int
)
for range qty {
prevChunkIdx = chunkIdx
prevByteIdx = byteIdx - 1
if prevByteIdx < 0 {
prevChunkIdx = chunkIdx - 1
prevByteIdx = chunkSize - 1
}
s.chunks[chunkIdx][byteIdx] = s.chunks[prevChunkIdx][prevByteIdx]
if byteIdx > 0 {
byteIdx--
} else {
chunkIdx--
byteIdx = chunkSize - 1
}
}
}
// [since, until)
func (s *ContinuousBuffer) ShiftOnePosToLeft(since int, until int) {
if since <= 0 {
panic("since <= 0")
}
if since >= until {
panic("since >= until")
}
chunkIdx := since / chunkSize
byteIdx := since % chunkSize
if until > len(s.chunks)*chunkSize {
panic(ErrOutOfRange)
}
var (
qty = until - since
prevChunkIdx int
prevByteIdx int
)
for range qty {
prevChunkIdx = chunkIdx
prevByteIdx = byteIdx - 1
if prevByteIdx < 0 {
prevChunkIdx = chunkIdx - 1
prevByteIdx = chunkSize - 1
}
s.chunks[prevChunkIdx][prevByteIdx] = s.chunks[chunkIdx][byteIdx]
byteIdx++
if byteIdx == chunkSize {
chunkIdx++
byteIdx = 0
}
}
}
func (s *ContinuousBuffer) PutUint32(pos int, num uint32) {
s.CopyTo(pos,
[]byte{
byte(num),
byte(num >> 8),
byte(num >> 16),
byte(num >> 24),
})
}
func (s *ContinuousBuffer) GetUint32(pos int) uint32 {
arr := s.Slice(pos, pos+4)
return uint32(arr[0]) | (uint32(arr[1]) << 8) |
(uint32(arr[2]) << 16) | (uint32(arr[3]) << 24)
}
func (s *ContinuousBuffer) ReversePutVarUint64(pos int, num uint64) int {
var tmp [9]byte
for i := range 8 {
tmp[i] = byte(num & 127)
num >>= 7
if num == 0 {
tmp[i] |= 128
n := i + 1
s.ReverseCopyTo(pos, tmp[:n])
return n
}
}
tmp[8] = byte(num)
n := 9
s.ReverseCopyTo(pos, tmp[:])
return n
}
func (s *ContinuousBuffer) ReverseGetVarUint64(idx int) (num uint64, n int, err error) {
chunkIdx := idx / chunkSize
if chunkIdx >= len(s.chunks) {
panic(ErrOutOfRange)
}
var (
byteIdx = idx % chunkSize
chunk = s.chunks[chunkIdx]
b byte
)
for i := range 8 {
b = chunk[byteIdx]
if b >= 128 {
num |= uint64(b&127) << uint(i*7)
return num, i + 1, nil
}
num |= uint64(b) << uint(i*7)
if byteIdx > 0 {
byteIdx--
} else {
if chunkIdx == 0 {
return 0, 0, io.EOF
} else {
chunkIdx--
chunk = s.chunks[chunkIdx]
byteIdx = chunkSize - 1
}
}
}
return num | uint64(chunk[byteIdx])<<56, 9, nil
}
func (s *ContinuousBuffer) ReversePutVarInt64(pos int, x int64) int {
return s.ReversePutVarUint64(pos, bin.EncodeZigZag(x))
}
func (s *ContinuousBuffer) ReverseGetVarInt64(idx int) (int64, int, error) {
u64, n, err := s.ReverseGetVarUint64(idx)
if err != nil {
return 0, 0, err
}
return bin.DecodeZigZag(u64), n, nil
}
func (s *ContinuousBuffer) PutVarUint64(pos int, num uint64) int {
var tmp [9]byte
for i := range 8 {
tmp[i] = byte(num & 127)
num >>= 7
if num == 0 {
tmp[i] |= 128
n := i + 1
s.CopyTo(pos, tmp[:n])
return n
}
}
tmp[8] = byte(num)
s.CopyTo(pos, tmp[:])
return 9
}
func (s *ContinuousBuffer) GetVarUint64(idx int) (num uint64, n int, err error) {
chunkIdx := idx / chunkSize
if chunkIdx >= len(s.chunks) {
panic(ErrOutOfRange)
}
var (
byteIdx = idx % chunkSize
chunk = s.chunks[chunkIdx]
b byte
)
for i := range 8 {
b = chunk[byteIdx]
if b >= 128 {
num |= uint64(b&127) << uint(i*7)
return num, i + 1, nil
}
num |= uint64(b) << uint(i*7)
byteIdx++
if byteIdx == chunkSize {
chunkIdx++
if chunkIdx == len(s.chunks) {
return 0, 0, io.EOF
}
chunk = s.chunks[chunkIdx]
byteIdx = 0
}
}
return num | uint64(chunk[byteIdx])<<56, 9, nil
}
func (s *ContinuousBuffer) PutVarInt64(idx int, x int64) int {
return s.PutVarUint64(idx, bin.EncodeZigZag(x))
}
func (s *ContinuousBuffer) GetVarInt64(idx int) (int64, int, error) {
u64, n, err := s.GetVarUint64(idx)
if err != nil {
return 0, 0, err
}
return bin.DecodeZigZag(u64), n, nil
}
func (s *ContinuousBuffer) CopyTo(idx int, data []byte) {
chunkIdx := idx / chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
byteIdx := idx % chunkSize
chunk := s.chunks[chunkIdx]
copied := 0
for _, b := range data {
chunk[byteIdx] = b
copied++
byteIdx++
if byteIdx == chunkSize {
byteIdx = 0
chunkIdx++
if chunkIdx == len(s.chunks) {
if copied == len(data) {
return
}
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
chunk = s.chunks[chunkIdx]
}
}
}
func (s *ContinuousBuffer) ReverseCopyTo(idx int, data []byte) {
chunkIdx := idx / chunkSize
if chunkIdx > len(s.chunks) {
panic(ErrOutOfRange)
}
if chunkIdx == len(s.chunks) {
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
byteIdx := idx % chunkSize
chunk := s.chunks[chunkIdx]
copied := 0
for i := len(data) - 1; i >= 0; i-- {
chunk[byteIdx] = data[i]
copied++
byteIdx++
if byteIdx == chunkSize {
byteIdx = 0
chunkIdx++
if chunkIdx == len(s.chunks) {
if copied == len(data) {
return
}
s.chunks = append(s.chunks, make([]byte, chunkSize))
}
chunk = s.chunks[chunkIdx]
}
}
}
// [since, until)
func (s *ContinuousBuffer) Slice(since int, until int) []byte {
if since >= until {
return nil
}
size := len(s.chunks) * chunkSize
if until >= size {
panic(ErrOutOfRange)
}
data := make([]byte, until-since)
chunkIdx := since / chunkSize
byteIdx := since % chunkSize
chunk := s.chunks[chunkIdx]
for i := range len(data) {
data[i] = chunk[byteIdx]
byteIdx++
if byteIdx == chunkSize {
byteIdx = 0
chunkIdx++
chunk = s.chunks[chunkIdx]
}
}
return data
}
func (s *ContinuousBuffer) DecodeRunLength(pos int) (length uint16, n int) {
b1 := s.GetByte(pos)
pos--
if b1 < 128 {
length = uint16(b1)
n = 1
} else {
b2 := s.GetByte(pos)
length = uint16(b1&127) | (uint16(b2) << 7)
n = 2
}
length += 2
return
}
func (s *ContinuousBuffer) Copy() *ContinuousBuffer {
var copies [][]byte
for _, chunk := range s.chunks {
buf := make([]byte, chunkSize)
copy(buf, chunk)
copies = append(copies, buf)
}
return New(copies)
}
// size to copy
func (s *ContinuousBuffer) CopyChunksToOneBuffer(dst []byte, size int) {
pos := 0
for _, chunk := range s.chunks {
if size >= len(chunk) {
copy(dst[pos:], chunk)
size -= len(chunk)
pos += len(chunk)
} else {
copy(dst[pos:], chunk[:size])
return
}
}
}

@ -0,0 +1,4 @@
tcpPort = 12345
dir = testdir
redoDir = testdir
databaseName = test

File diff suppressed because it is too large Load Diff

@ -0,0 +1,460 @@
package database
import (
"errors"
"fmt"
"hash/crc32"
"io"
"log"
"net"
"os"
"path/filepath"
"regexp"
"sync"
"time"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/atree"
"gordenko.dev/dima/diploma/atree/redo"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/chunkenc"
"gordenko.dev/dima/diploma/conbuf"
"gordenko.dev/dima/diploma/freelist"
"gordenko.dev/dima/diploma/recovery"
"gordenko.dev/dima/diploma/txlog"
)
func JoinSnapshotFileName(dir string, logNumber int) string {
return filepath.Join(dir, fmt.Sprintf("%d.snapshot", logNumber))
}
type metricLockEntry struct {
XLock bool
RLocks int
WaitQueue []any
}
type Database struct {
mutex sync.Mutex
workerSignalCh chan struct{}
workerQueue []any
rLocksToRelease []uint32
metrics map[uint32]*_metric
metricLockEntries map[uint32]*metricLockEntry
dataFreeList *freelist.FreeList
indexFreeList *freelist.FreeList
dir string
databaseName string
redoDir string
txlog *txlog.Writer
atree *atree.Atree
tcpPort int
logfile *os.File
logger *log.Logger
exitCh chan struct{}
waitGroup *sync.WaitGroup
}
type Options struct {
TCPPort int
Dir string
DatabaseName string
RedoDir string
Logfile *os.File
ExitCh chan struct{}
WaitGroup *sync.WaitGroup
}
func New(opt Options) (_ *Database, err error) {
if opt.TCPPort <= 0 {
return nil, errors.New("TCPPort option is required")
}
if opt.Dir == "" {
return nil, errors.New("Dir option is required")
}
if opt.DatabaseName == "" {
return nil, errors.New("DatabaseName option is required")
}
if opt.RedoDir == "" {
return nil, errors.New("RedoDir option is required")
}
if opt.Logfile == nil {
return nil, errors.New("Logfile option is required")
}
if opt.ExitCh == nil {
return nil, errors.New("ExitCh option is required")
}
if opt.WaitGroup == nil {
return nil, errors.New("WaitGroup option is required")
}
s := &Database{
workerSignalCh: make(chan struct{}, 1),
dir: opt.Dir,
databaseName: opt.DatabaseName,
redoDir: opt.RedoDir,
metrics: make(map[uint32]*_metric),
metricLockEntries: make(map[uint32]*metricLockEntry),
dataFreeList: freelist.New(),
indexFreeList: freelist.New(),
tcpPort: opt.TCPPort,
logfile: opt.Logfile,
logger: log.New(opt.Logfile, "", log.LstdFlags),
exitCh: opt.ExitCh,
waitGroup: opt.WaitGroup,
}
return s, nil
}
func (s *Database) ListenAndServe() (err error) {
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.tcpPort))
if err != nil {
return fmt.Errorf("net.Listen: %s; port=%d", err, s.tcpPort)
}
s.atree, err = atree.New(atree.Options{
Dir: s.dir,
DatabaseName: s.databaseName,
RedoDir: s.redoDir,
DataFreeList: s.dataFreeList,
IndexFreeList: s.indexFreeList,
})
if err != nil {
return fmt.Errorf("atree.New: %s", err)
}
s.atree.Run()
go s.worker()
s.recovery()
s.logger.Println("database started")
for {
// Listen for an incoming connection.
conn, err := listener.Accept()
if err != nil {
s.logger.Printf("listener.Accept: %s\n", err)
time.Sleep(time.Second)
} else {
go s.handleTCPConn(conn)
}
}
}
func (s *Database) recovery() {
advisor, err := recovery.NewRecoveryAdvisor(recovery.RecoveryAdvisorOptions{
Dir: s.dir,
VerifySnapshot: s.verifySnapshot,
})
if err != nil {
panic(err)
}
recipe, err := advisor.GetRecipe()
if err != nil {
diploma.Abort(diploma.GetRecoveryRecipeFailed, err)
}
var logNumber int
if recipe != nil {
if recipe.Snapshot != "" {
err = s.loadSnapshot(recipe.Snapshot)
if err != nil {
diploma.Abort(diploma.LoadSnapshotFailed, err)
}
}
for _, changesFileName := range recipe.Changes {
err = s.replayChanges(changesFileName)
if err != nil {
diploma.Abort(diploma.ReplayChangesFailed, err)
}
}
logNumber = recipe.LogNumber
}
s.txlog, err = txlog.NewWriter(txlog.WriterOptions{
Dir: s.dir,
LogNumber: logNumber,
AppendToWorkerQueue: s.appendJobToWorkerQueue,
ExitCh: s.exitCh,
WaitGroup: s.waitGroup,
})
if err != nil {
diploma.Abort(diploma.CreateChangesWriterFailed, err)
}
go s.txlog.Run()
fileNames, err := s.searchREDOFiles()
if err != nil {
diploma.Abort(diploma.SearchREDOFilesFailed, err)
}
if len(fileNames) > 0 {
for _, fileName := range fileNames {
err = s.replayREDOFile(fileName)
if err != nil {
diploma.Abort(diploma.ReplayREDOFileFailed, err)
}
}
for _, fileName := range fileNames {
err = os.Remove(fileName)
if err != nil {
diploma.Abort(diploma.RemoveREDOFileFailed, err)
}
}
}
if recipe != nil {
if recipe.CompleteSnapshot {
err = s.dumpSnapshot(logNumber)
if err != nil {
diploma.Abort(diploma.DumpSnapshotFailed, err)
}
}
for _, fileName := range recipe.ToDelete {
err = os.Remove(fileName)
if err != nil {
diploma.Abort(diploma.RemoveRecipeFileFailed, err)
}
}
}
}
func (s *Database) searchREDOFiles() ([]string, error) {
var (
reREDO = regexp.MustCompile(`a\d+\.redo`)
fileNames []string
)
entries, err := os.ReadDir(s.redoDir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.Type().IsRegular() {
baseName := entry.Name()
if reREDO.MatchString(baseName) {
fileNames = append(fileNames, filepath.Join(s.redoDir, baseName))
}
}
}
return fileNames, nil
}
func (s *Database) replayREDOFile(fileName string) error {
redoFile, err := redo.ReadREDOFile(redo.ReadREDOFileReq{
FileName: fileName,
DataPageSize: atree.DataPageSize,
IndexPageSize: atree.IndexPageSize,
})
if err != nil {
return fmt.Errorf("can't read REDO file %s: %s", fileName, err)
}
metric, ok := s.metrics[redoFile.MetricID]
if !ok {
return fmt.Errorf("has REDOFile, metric %d not found", redoFile.MetricID)
}
if metric.Until < redoFile.Timestamp {
waitCh := make(chan struct{})
s.atree.ApplyREDO(atree.WriteTask{
DataPage: redoFile.DataPage,
IndexPages: redoFile.IndexPages,
})
<-waitCh
waitCh = s.txlog.WriteAppendedMeasureWithOverflow(
txlog.AppendedMeasureWithOverflow{
MetricID: redoFile.MetricID,
Timestamp: redoFile.Timestamp,
Value: redoFile.Value,
IsDataPageReused: redoFile.IsDataPageReused,
DataPageNo: redoFile.DataPage.PageNo,
IsRootChanged: redoFile.IsRootChanged,
RootPageNo: redoFile.RootPageNo,
ReusedIndexPages: redoFile.ReusedIndexPages,
},
fileName,
false,
)
<-waitCh
}
return nil
}
func (s *Database) verifySnapshot(fileName string) (_ bool, err error) {
file, err := os.Open(fileName)
if err != nil {
return
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return
}
if stat.Size() <= 4 {
return false, nil
}
var (
payloadSize = stat.Size() - 4
hash = crc32.NewIEEE()
)
_, err = io.CopyN(hash, file, payloadSize)
if err != nil {
return
}
calculatedCRC := hash.Sum32()
storedCRC, err := bin.ReadUint32(file)
if err != nil {
return
}
if storedCRC != calculatedCRC {
return false, fmt.Errorf("strored CRC %d not equal calculated CRC %d",
storedCRC, calculatedCRC)
}
return true, nil
}
func (s *Database) replayChanges(fileName string) error {
walReader, err := txlog.NewReader(txlog.ReaderOptions{
FileName: fileName,
BufferSize: 1024 * 1024,
})
if err != nil {
return err
}
for {
lsn, records, done, err := walReader.ReadPacket()
if err != nil {
return err
}
_ = lsn
if done {
return nil
}
for _, record := range records {
if err = s.replayChangesRecord(record); err != nil {
return err
}
}
}
}
func (s *Database) replayChangesRecord(untyped any) error {
switch rec := untyped.(type) {
case txlog.AddedMetric:
var (
values diploma.ValueCompressor
timestampsBuf = conbuf.New(nil)
valuesBuf = conbuf.New(nil)
)
if rec.MetricType == diploma.Cumulative {
values = chunkenc.NewReverseCumulativeDeltaCompressor(
valuesBuf, 0, byte(rec.FracDigits))
} else {
values = chunkenc.NewReverseInstantDeltaCompressor(
valuesBuf, 0, byte(rec.FracDigits))
}
s.metrics[rec.MetricID] = &_metric{
MetricType: rec.MetricType,
FracDigits: byte(rec.FracDigits),
TimestampsBuf: timestampsBuf,
ValuesBuf: valuesBuf,
Timestamps: chunkenc.NewReverseTimeDeltaOfDeltaCompressor(timestampsBuf, 0),
Values: values,
}
case txlog.DeletedMetric:
delete(s.metrics, rec.MetricID)
if len(rec.FreeDataPages) > 0 {
s.dataFreeList.AddPages(rec.FreeDataPages)
}
if len(rec.FreeIndexPages) > 0 {
s.indexFreeList.AddPages(rec.FreeIndexPages)
}
case txlog.AppendedMeasure:
metric, ok := s.metrics[rec.MetricID]
if ok {
metric.Timestamps.Append(rec.Timestamp)
metric.Values.Append(rec.Value)
if metric.Since == 0 {
metric.Since = rec.Timestamp
metric.SinceValue = rec.Value
}
metric.Until = rec.Timestamp
metric.UntilValue = rec.Value
}
case txlog.AppendedMeasures:
metric, ok := s.metrics[rec.MetricID]
if ok {
for _, measure := range rec.Measures {
metric.Timestamps.Append(measure.Timestamp)
metric.Values.Append(measure.Value)
if metric.Since == 0 {
metric.Since = measure.Timestamp
metric.SinceValue = measure.Value
}
metric.Until = measure.Timestamp
metric.UntilValue = measure.Value
}
}
case txlog.AppendedMeasureWithOverflow:
metric, ok := s.metrics[rec.MetricID]
if ok {
metric.ReinitBy(rec.Timestamp, rec.Value)
if rec.IsRootChanged {
metric.RootPageNo = rec.RootPageNo
}
metric.LastPageNo = rec.DataPageNo
// delete free pages
if rec.IsDataPageReused {
s.dataFreeList.DeletePages([]uint32{
rec.DataPageNo,
})
}
if len(rec.ReusedIndexPages) > 0 {
s.indexFreeList.DeletePages(rec.ReusedIndexPages)
}
}
case txlog.DeletedMeasures:
metric, ok := s.metrics[rec.MetricID]
if ok {
metric.DeleteMeasures()
if len(rec.FreeDataPages) > 0 {
s.dataFreeList.AddPages(rec.FreeDataPages)
}
if len(rec.FreeDataPages) > 0 {
s.indexFreeList.AddPages(rec.FreeIndexPages)
}
}
default:
diploma.Abort(diploma.UnknownTxLogRecordTypeBug,
fmt.Errorf("bug: unknown record type %T in TransactionLog", rec))
}
return nil
}

@ -0,0 +1,50 @@
package database
import (
"errors"
"io/fs"
"os"
"time"
)
func isFileExist(fileName string) (bool, error) {
_, err := os.Stat(fileName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return false, nil
} else {
return false, err
}
} else {
return true, nil
}
}
func (s *Database) appendJobToWorkerQueue(job any) {
s.mutex.Lock()
s.workerQueue = append(s.workerQueue, job)
s.mutex.Unlock()
select {
case s.workerSignalCh <- struct{}{}:
default:
}
}
func (s *Database) metricRUnlock(metricID uint32) {
s.mutex.Lock()
s.rLocksToRelease = append(s.rLocksToRelease, metricID)
s.mutex.Unlock()
select {
case s.workerSignalCh <- struct{}{}:
default:
}
}
func correctToFHD(since, until uint32, firstHourOfDay int) (uint32, uint32) {
duration := time.Duration(firstHourOfDay) * time.Hour
since = uint32(time.Unix(int64(since), 0).Add(duration).Unix())
until = uint32(time.Unix(int64(until), 0).Add(duration).Unix())
return since, until
}

@ -0,0 +1,71 @@
package database
import (
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/chunkenc"
"gordenko.dev/dima/diploma/conbuf"
)
// METRIC
type _metric struct {
MetricType octopus.MetricType
FracDigits byte
RootPageNo uint32
LastPageNo uint32
SinceValue float64
Since uint32
UntilValue float64
Until uint32
TimestampsBuf *conbuf.ContinuousBuffer
ValuesBuf *conbuf.ContinuousBuffer
Timestamps octopus.TimestampCompressor
Values octopus.ValueCompressor
}
func (s *_metric) ReinitBy(timestamp uint32, value float64) {
s.TimestampsBuf = conbuf.New(nil)
s.ValuesBuf = conbuf.New(nil)
//
s.Timestamps = chunkenc.NewReverseTimeDeltaOfDeltaCompressor(
s.TimestampsBuf, 0)
if s.MetricType == octopus.Cumulative {
s.Values = chunkenc.NewReverseCumulativeDeltaCompressor(
s.ValuesBuf, 0, s.FracDigits)
} else {
s.Values = chunkenc.NewReverseInstantDeltaCompressor(
s.ValuesBuf, 0, s.FracDigits)
}
s.Timestamps.Append(timestamp)
s.Values.Append(value)
s.Since = timestamp
s.SinceValue = value
s.Until = timestamp
s.UntilValue = value
}
func (s *_metric) DeleteMeasures() {
s.TimestampsBuf = conbuf.New(nil)
s.ValuesBuf = conbuf.New(nil)
//
s.Timestamps = chunkenc.NewReverseTimeDeltaOfDeltaCompressor(
s.TimestampsBuf, 0)
if s.MetricType == octopus.Cumulative {
s.Values = chunkenc.NewReverseCumulativeDeltaCompressor(
s.ValuesBuf, 0, s.FracDigits)
} else {
s.Values = chunkenc.NewReverseInstantDeltaCompressor(
s.ValuesBuf, 0, s.FracDigits)
}
s.RootPageNo = 0
s.LastPageNo = 0
s.Since = 0
s.SinceValue = 0
s.Until = 0
s.UntilValue = 0
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,287 @@
package database
import (
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/atree"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/chunkenc"
"gordenko.dev/dima/diploma/conbuf"
"gordenko.dev/dima/diploma/freelist"
)
/*
Формат:
//lsn - varuint (останній LSN, що змінив дані у RAM)
metricsQty - varuint
[metric]*
где metric - це:
metricID - 4b
metricType - 1b
fracDigits - 1b
rootPageNo - 4b
lastPageNo - 4b
since - 4b
sinceValue - 8b
until - 4b
untilValue - 8b
timestamps size - 2b
values size - 2b
timestams payload - Nb
values payload - Nb
dataFreeList size - varuint
dataFreeList - Nb
indexFreeList size - varuint
indexFreeList - Nb
CRC32 - 4b
*/
const metricHeaderSize = 42
func (s *Database) dumpSnapshot(logNumber int) (err error) {
var (
fileName = filepath.Join(s.dir, fmt.Sprintf("%d.snapshot", logNumber))
hasher = crc32.NewIEEE()
prefix = make([]byte, metricHeaderSize)
)
file, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY, 0770)
if err != nil {
return
}
dst := io.MultiWriter(file, hasher)
_, err = bin.WriteVarUint64(dst, uint64(len(s.metrics)))
if err != nil {
return
}
for metricID, metric := range s.metrics {
tSize := metric.Timestamps.Size()
vSize := metric.Values.Size()
bin.PutUint32(prefix[0:], metricID)
prefix[4] = byte(metric.MetricType)
prefix[5] = metric.FracDigits
bin.PutUint32(prefix[6:], metric.RootPageNo)
bin.PutUint32(prefix[10:], metric.LastPageNo)
bin.PutUint32(prefix[14:], metric.Since)
bin.PutFloat64(prefix[18:], metric.SinceValue)
bin.PutUint32(prefix[26:], metric.Until)
bin.PutFloat64(prefix[30:], metric.UntilValue)
bin.PutUint16(prefix[38:], uint16(tSize))
bin.PutUint16(prefix[40:], uint16(vSize))
_, err = dst.Write(prefix)
if err != nil {
return
}
// copy timestamps
remaining := tSize
for _, buf := range metric.TimestampsBuf.Chunks() {
if remaining < len(buf) {
buf = buf[:remaining]
}
_, err = dst.Write(buf)
if err != nil {
return
}
remaining -= len(buf)
if remaining == 0 {
break
}
}
// copy values
remaining = vSize
for _, buf := range metric.ValuesBuf.Chunks() {
if remaining < len(buf) {
buf = buf[:remaining]
}
_, err = dst.Write(buf)
if err != nil {
return
}
remaining -= len(buf)
if remaining == 0 {
break
}
}
}
// free data pages
err = freeListWriteTo(s.dataFreeList, dst)
if err != nil {
return
}
// free index pages
err = freeListWriteTo(s.indexFreeList, dst)
if err != nil {
return
}
bin.WriteUint32(file, hasher.Sum32())
err = file.Sync()
if err != nil {
return
}
err = file.Close()
if err != nil {
return
}
prevLogNumber := logNumber - 1
prevChanges := filepath.Join(s.dir, fmt.Sprintf("%d.changes", prevLogNumber))
prevSnapshot := filepath.Join(s.dir, fmt.Sprintf("%d.snapshot", prevLogNumber))
isExist, err := isFileExist(prevChanges)
if err != nil {
return
}
if isExist {
err = os.Remove(prevChanges)
if err != nil {
octopus.Abort(octopus.DeletePrevChangesFileFailed, err)
}
}
isExist, err = isFileExist(prevSnapshot)
if err != nil {
return
}
if isExist {
err = os.Remove(prevSnapshot)
if err != nil {
octopus.Abort(octopus.DeletePrevSnapshotFileFailed, err)
}
}
return
}
func (s *Database) loadSnapshot(fileName string) (err error) {
var (
hasher = crc32.NewIEEE()
metricsQty int
header = make([]byte, metricHeaderSize)
body = make([]byte, atree.DataPageSize)
)
file, err := os.Open(fileName)
if err != nil {
return
}
src := io.TeeReader(file, hasher)
u64, _, err := bin.ReadVarUint64(src)
if err != nil {
return
}
metricsQty = int(u64)
for range metricsQty {
var metric _metric
err = bin.ReadNInto(src, header)
if err != nil {
return
}
metricID := bin.GetUint32(header[0:])
metric.MetricType = octopus.MetricType(header[4])
metric.FracDigits = header[5]
metric.RootPageNo = bin.GetUint32(header[6:])
metric.LastPageNo = bin.GetUint32(header[10:])
metric.Since = bin.GetUint32(header[14:])
metric.SinceValue = bin.GetFloat64(header[18:])
metric.Until = bin.GetUint32(header[26:])
metric.UntilValue = bin.GetFloat64(header[30:])
tSize := bin.GetUint16(header[38:])
vSize := bin.GetUint16(header[40:])
buf := body[:tSize]
err = bin.ReadNInto(src, buf)
if err != nil {
return
}
metric.TimestampsBuf = conbuf.NewFromBuffer(buf)
buf = body[:vSize]
err = bin.ReadNInto(src, buf)
if err != nil {
return
}
metric.ValuesBuf = conbuf.NewFromBuffer(buf)
metric.Timestamps = chunkenc.NewReverseTimeDeltaOfDeltaCompressor(
metric.TimestampsBuf, int(tSize))
if metric.MetricType == octopus.Cumulative {
metric.Values = chunkenc.NewReverseCumulativeDeltaCompressor(
metric.ValuesBuf, int(vSize), metric.FracDigits)
} else {
metric.Values = chunkenc.NewReverseInstantDeltaCompressor(
metric.ValuesBuf, int(vSize), metric.FracDigits)
}
s.metrics[metricID] = &metric
}
err = restoreFreeList(s.dataFreeList, src)
if err != nil {
return fmt.Errorf("restore dataFreeList: %s", err)
}
err = restoreFreeList(s.indexFreeList, src)
if err != nil {
return fmt.Errorf("restore indexFreeList: %s", err)
}
calculatedChecksum := hasher.Sum32()
writtenChecksum, err := bin.ReadUint32(file)
if err != nil {
return
}
if calculatedChecksum != writtenChecksum {
return fmt.Errorf("calculated checksum %d not equal written checksum %d", calculatedChecksum, writtenChecksum)
}
return
}
// HELPERS
func freeListWriteTo(freeList *freelist.FreeList, dst io.Writer) error {
serialized, err := freeList.Serialize()
if err != nil {
octopus.Abort(octopus.FailedFreeListSerialize, err)
}
_, err = bin.WriteVarUint64(dst, uint64(len(serialized)))
if err != nil {
return err
}
_, err = dst.Write(serialized)
if err != nil {
return err
}
return nil
}
func restoreFreeList(freeList *freelist.FreeList, src io.Reader) error {
size, _, err := bin.ReadVarUint64(src)
if err != nil {
return err
}
serialized, err := bin.ReadN(src, int(size))
if err != nil {
return err
}
freeList.Restore(serialized)
return nil
}

Binary file not shown.

Binary file not shown.

@ -0,0 +1,88 @@
package diploma
import (
"fmt"
"os"
)
type MetricType byte
type GroupBy byte
const (
Cumulative MetricType = 1
Instant MetricType = 2
MaxFracDigits byte = 7
GroupByHour GroupBy = 1
GroupByDay GroupBy = 2
GroupByMonth GroupBy = 3
AggregateMin byte = 1
AggregateMax byte = 2
AggregateAvg byte = 4
)
type TimestampCompressor interface {
CalcRequiredSpace(uint32) int
Append(uint32)
Size() int
DeleteLast()
//LastTimestamp() uint32
}
type ValueCompressor interface {
CalcRequiredSpace(float64) int
Append(float64)
Size() int
DeleteLast()
//LastValue() float64
}
type TimestampDecompressor interface {
NextValue() (uint32, bool)
}
type ValueDecompressor interface {
NextValue() (float64, bool)
}
type AbortCode int
const (
// Fatal errors
WrongPrevPageNo AbortCode = 1
WriteToAtreeFailed AbortCode = 2
MaxAtreeSizeExceeded AbortCode = 3
FailedWriteToTxLog AbortCode = 4
ReferenceCountBug AbortCode = 5
WrongResultCodeBug AbortCode = 6
RemoveREDOFileFailed AbortCode = 7
FailedAtreeRequest AbortCode = 8
UnknownTxLogRecordTypeBug AbortCode = 11
HasTimestampNoValueBug AbortCode = 12
NoMetricBug AbortCode = 13
NoLockEntryBug AbortCode = 14
NoXLockBug AbortCode = 15
MetricAddedBug AbortCode = 16
NoRLockBug AbortCode = 17
XLockBug AbortCode = 18
FailedFreeListSerialize AbortCode = 19
UnknownWorkerQueueItemBug AbortCode = 20
UnknownMetricWaitQueueItemBug AbortCode = 21
//
GetRecoveryRecipeFailed AbortCode = 26
LoadSnapshotFailed AbortCode = 27
ReplayChangesFailed AbortCode = 28
CreateChangesWriterFailed AbortCode = 29
RemoveRecipeFileFailed AbortCode = 30
DumpSnapshotFailed AbortCode = 31
SearchREDOFilesFailed AbortCode = 32
ReplayREDOFileFailed AbortCode = 33
DeletePrevChangesFileFailed AbortCode = 34
DeletePrevSnapshotFileFailed AbortCode = 35
)
func Abort(code AbortCode, err error) {
fmt.Println(err)
os.Exit(int(code))
}

@ -0,0 +1,102 @@
package enc
import (
"math"
"gordenko.dev/dima/diploma/bin"
)
type ReverseCumulativeDeltaDecompressor struct {
buf []byte
pos int
bound int
firstValue float64
lastValue float64
length uint16
coef float64
idxOf8 uint
s8 byte
}
func NewReverseCumulativeDeltaDecompressor(buf []byte, fracDigits byte) *ReverseCumulativeDeltaDecompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
return &ReverseCumulativeDeltaDecompressor{
buf: buf,
coef: coef,
pos: len(buf),
}
}
func (s *ReverseCumulativeDeltaDecompressor) NextValue() (value float64, done bool) {
if s.length > 0 {
s.length--
return s.lastValue, false
}
if s.pos < s.bound {
return 0, true
}
if s.pos == len(s.buf) {
u64, n, err := bin.GetVarUint64(s.buf)
if err != nil {
panic(err)
}
s.firstValue = float64(u64) / s.coef
s.bound = n
s.pos--
s.idxOf8 = uint(8 - s.buf[s.pos])
s.pos--
s.s8 = s.buf[s.pos]
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
if s.idxOf8 == 0 {
s.s8 = s.buf[s.pos]
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
func (s *ReverseCumulativeDeltaDecompressor) readVar() {
u64, n, err := bin.ReverseGetVarUint64(s.buf[:s.pos+1])
if err != nil {
panic(err)
}
s.pos -= n
s.lastValue = s.firstValue + float64(u64)/s.coef
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.decodeLength()
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}
func (s *ReverseCumulativeDeltaDecompressor) decodeLength() {
b1 := s.buf[s.pos]
s.pos--
if b1 < 128 {
s.length = uint16(b1)
} else {
b2 := s.buf[s.pos]
s.pos--
s.length = uint16(b1&127) | (uint16(b2) << 7)
}
s.length += 2
}

@ -0,0 +1,3 @@
package enc
const eps = 0.000001

@ -0,0 +1,130 @@
package enc
import (
"fmt"
"math"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
)
type ReverseInstantDeltaDecompressor struct {
buf []byte
pos int
bound int
firstValue float64
lastValue float64
length uint16
coef float64
idxOf8 uint
s8 byte
}
func NewReverseInstantDeltaDecompressor(buf []byte, fracDigits byte) *ReverseInstantDeltaDecompressor {
var coef float64 = 1
if fracDigits > 0 {
coef = math.Pow(10, float64(fracDigits))
}
return &ReverseInstantDeltaDecompressor{
buf: buf,
coef: coef,
pos: len(buf),
}
}
func (s *ReverseInstantDeltaDecompressor) NextValue() (value float64, done bool) {
if s.length > 0 {
s.length--
return s.lastValue, false
}
if s.pos < s.bound {
return 0, true
}
if s.pos == len(s.buf) {
u64, n, err := bin.GetVarInt64(s.buf)
if err != nil {
panic(err)
}
s.firstValue = float64(u64) / s.coef
s.bound = n
s.pos--
s.idxOf8 = uint(8 - s.buf[s.pos])
s.pos--
s.s8 = s.buf[s.pos]
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
if s.idxOf8 == 0 {
s.s8 = s.buf[s.pos]
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastValue, false
}
func (s *ReverseInstantDeltaDecompressor) readVar() {
i64, n, err := bin.ReverseGetVarInt64(s.buf[:s.pos+1])
if err != nil {
panic(err)
}
s.pos -= n
s.lastValue = s.firstValue + float64(i64)/s.coef
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.decodeLength()
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}
func (s *ReverseInstantDeltaDecompressor) decodeLength() {
b1 := s.buf[s.pos]
s.pos--
if b1 < 128 {
s.length = uint16(b1)
} else {
b2 := s.buf[s.pos]
s.pos--
s.length = uint16(b1&127) | (uint16(b2) << 7)
}
s.length += 2
}
func GetValueBounds(valuesBuf []byte, metricType octopus.MetricType, fracDigits byte) (sinceValue, untilValue float64) {
var decompressor octopus.ValueDecompressor
switch metricType {
case octopus.Instant:
decompressor = NewReverseInstantDeltaDecompressor(valuesBuf, fracDigits)
case octopus.Cumulative:
decompressor = NewReverseCumulativeDeltaDecompressor(valuesBuf, fracDigits)
default:
panic(fmt.Sprintf("unknown metricType %d", metricType))
}
value, done := decompressor.NextValue()
if done {
return
}
sinceValue = value
untilValue = value
for {
value, done = decompressor.NextValue()
if done {
return
}
sinceValue = value
}
}

@ -0,0 +1,145 @@
package enc
import (
"gordenko.dev/dima/diploma/bin"
)
// REVERSE
const (
lastUnixtimeIdx = 0
baseDeltaIdx = 4
)
type ReverseTimeDeltaOfDeltaDecompressor struct {
step byte
buf []byte
pos int
bound int
lastUnixtime uint32
baseDelta uint32
lastDeltaOfDelta int64
length uint16
idxOf8 uint
s8 byte
}
func NewReverseTimeDeltaOfDeltaDecompressor(buf []byte) *ReverseTimeDeltaOfDeltaDecompressor {
return &ReverseTimeDeltaOfDeltaDecompressor{
buf: buf,
pos: len(buf),
}
}
func (s *ReverseTimeDeltaOfDeltaDecompressor) NextValue() (value uint32, done bool) {
if s.step == 0 {
if s.pos == 0 {
return 0, true
}
s.lastUnixtime = bin.GetUint32(s.buf[lastUnixtimeIdx:])
s.step = 1
return s.lastUnixtime, false
}
if s.step == 1 {
if s.pos == baseDeltaIdx {
return 0, true
}
u64, n, err := bin.GetVarUint64(s.buf[baseDeltaIdx:])
if err != nil {
panic("EOF")
}
s.bound = baseDeltaIdx + n
s.baseDelta = uint32(u64)
s.pos--
s.idxOf8 = uint(8 - s.buf[s.pos])
s.pos--
s.s8 = s.buf[s.pos]
s.pos--
s.readVar()
if s.length > 0 {
s.length--
}
s.step = 2
return s.lastUnixtime, false
}
if s.length > 0 {
s.length--
delta := int64(s.baseDelta) + s.lastDeltaOfDelta
s.lastUnixtime = uint32(int64(s.lastUnixtime) - delta)
return s.lastUnixtime, false
}
if s.pos < s.bound {
return 0, true
}
if s.idxOf8 == 0 {
s.s8 = s.buf[s.pos]
s.pos--
}
s.readVar()
if s.length > 0 {
s.length--
}
return s.lastUnixtime, false
}
func GetTimeRange(timestampsBuf []byte) (since, until uint32) {
decompressor := NewReverseTimeDeltaOfDeltaDecompressor(timestampsBuf)
value, done := decompressor.NextValue()
if done {
return
}
since = value
until = value
for {
value, done = decompressor.NextValue()
if done {
return
}
since = value
}
}
func (s *ReverseTimeDeltaOfDeltaDecompressor) readVar() {
var (
n int
err error
)
s.lastDeltaOfDelta, n, err = bin.ReverseGetVarInt64(s.buf[:s.pos+1])
if err != nil {
panic(err)
}
s.pos -= n
delta := int64(s.baseDelta) + s.lastDeltaOfDelta
s.lastUnixtime = uint32(int64(s.lastUnixtime) - delta)
var flag byte = 1 << s.idxOf8
if (s.s8 & flag) == flag {
s.decodeLength()
}
if s.idxOf8 == 7 {
s.idxOf8 = 0
} else {
s.idxOf8++
}
}
func (s *ReverseTimeDeltaOfDeltaDecompressor) decodeLength() {
b1 := s.buf[s.pos]
s.pos--
if b1 < 128 {
s.length = uint16(b1)
} else {
b2 := s.buf[s.pos]
s.pos--
s.length = uint16(b1&127) | (uint16(b2) << 7)
}
s.length += 2
}

@ -0,0 +1,135 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"gopkg.in/ini.v1"
"gordenko.dev/dima/diploma/database"
)
func main() {
var (
logfile = os.Stdout
iniFileName string
)
flag.Usage = func() {
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
fmt.Fprint(flag.CommandLine.Output(), configExample)
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
flag.PrintDefaults()
}
flag.StringVar(&iniFileName, "c", "database.ini", "path to *.ini config file")
flag.Parse()
config, err := loadConfig(iniFileName)
if err != nil {
log.Fatalln(err)
}
var (
exitCh = make(chan struct{})
wg = new(sync.WaitGroup)
)
db, err := database.New(database.Options{
TCPPort: config.TcpPort,
Dir: config.Dir,
DatabaseName: config.DatabaseName,
RedoDir: config.REDODir,
Logfile: logfile,
ExitCh: exitCh,
WaitGroup: wg,
})
if err != nil {
log.Fatalf("database.New: %s\n", err)
}
go func() {
err = db.ListenAndServe()
if err != nil {
log.Fatalln(err)
}
}()
wg.Add(1)
fmt.Fprintf(logfile, "database %q started on port %d.\n",
config.DatabaseName, config.TcpPort)
fmt.Fprintln(logfile, config)
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
<-sigs
close(exitCh)
wg.Wait()
fmt.Fprintln(logfile, "database stopped.")
}
// config file
const mainUsage = `Usage:
%s -c path/to/config.ini
`
const helpMessage = `Diploma project. Database. Version: 1.0
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
`
const configExample = `
database.ini example:
tcpPort = 12345
dir = ../../datadir
redoDir = ../../datadir
databaseName = test
`
type Config struct {
TcpPort int
Dir string
REDODir string
DatabaseName string
}
func (s Config) String() string {
return fmt.Sprintf(`starting options:
tcpPort = %d
dir = %s
redoDir = %s
databaseName = %s
`,
s.TcpPort, s.Dir, s.REDODir, s.DatabaseName)
}
func loadConfig(iniFileName string) (_ Config, err error) {
file, err := ini.Load(iniFileName)
if err != nil {
return
}
conf := Config{}
top := file.Section("")
conf.TcpPort, err = top.Key("tcpPort").Int()
if err != nil {
err = fmt.Errorf("'tcpPort' option is required in config file")
return
}
conf.Dir = top.Key("dir").String()
conf.REDODir = top.Key("redoDir").String()
conf.DatabaseName = top.Key("databaseName").String()
return conf, nil
}

@ -0,0 +1,377 @@
package main
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"time"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/client"
"gordenko.dev/dima/diploma/proto"
)
// METRICS INFO
type MetricInfo struct {
MetricID uint32 `json:"metricID"`
MetricType diploma.MetricType `json:"metricType"`
FracDigits int `json:"fracDigits"`
Since int64 `json:"since"`
Until int64 `json:"until"`
Qty int `json:"qty"`
}
func readMetricInfo(fileName string) (list []MetricInfo, err error) {
buf, err := os.ReadFile(fileName)
if err != nil {
return
}
err = json.Unmarshal(buf, &list)
return
}
// RANDOM QUERY GENERATOR
type QueryRecipe struct {
MetricID uint32
MetricIDs []uint32
Method int
RangeCode int
Since uint32
Until uint32
GroupBy diploma.GroupBy
}
type RandomQueryGenerator struct {
metrics []MetricInfo
groupByOptions []diploma.GroupBy
listCurrentValuesProbability int
listPeriodsProbability int
timeRangeProbDistribution []int
}
type RandomQueryGeneratorOptions struct {
Metrics []MetricInfo
ListCurrentValuesProbability int
ListPeriodsProbability int
TimeRangeProbabilities []int
}
func NewRandomQueryGenerator(opt RandomQueryGeneratorOptions) *RandomQueryGenerator {
if opt.ListCurrentValuesProbability >= 100 {
panic(fmt.Sprintf("wrong ListCurrentValuesProbability: %d", opt.ListCurrentValuesProbability))
}
if opt.ListPeriodsProbability >= 100 {
panic(fmt.Sprintf("wrong ListPeriodsProbability: %d", opt.ListPeriodsProbability))
}
// check total time range propability
var totalTimeRangeProbability int
for _, p := range opt.TimeRangeProbabilities {
totalTimeRangeProbability += p
}
if totalTimeRangeProbability != 100 {
panic(fmt.Sprintf("total time range probabilities != 100: %d", totalTimeRangeProbability))
}
// create time range probability distribution
timeRangeProbDistribution := make([]int, len(opt.TimeRangeProbabilities))
timeRangeProbDistribution[0] = opt.TimeRangeProbabilities[0]
for i := 1; i < len(opt.TimeRangeProbabilities); i++ {
timeRangeProbDistribution[i] = timeRangeProbDistribution[i-1] + opt.TimeRangeProbabilities[i]
}
return &RandomQueryGenerator{
metrics: opt.Metrics,
groupByOptions: []diploma.GroupBy{
diploma.GroupByHour,
diploma.GroupByDay,
diploma.GroupByMonth,
},
listCurrentValuesProbability: opt.ListCurrentValuesProbability,
listPeriodsProbability: opt.ListPeriodsProbability,
timeRangeProbDistribution: timeRangeProbDistribution,
}
}
func (s *RandomQueryGenerator) GetQueryRecipe() QueryRecipe {
metric := s.getRandomMetric()
num := rand.Intn(100)
if num < s.listCurrentValuesProbability {
qty := 5 + rand.Intn(100) // від 5 до 105
return QueryRecipe{
MetricIDs: s.listRandomUniqueMetricIDs(qty),
Method: listCurrentValues,
}
} else {
if metric.MetricType == diploma.Cumulative {
num = rand.Intn(100)
if num < s.listPeriodsProbability {
groupBy := s.groupByOptions[rand.Intn(len(s.groupByOptions))]
var (
minDays = 1
maxDays = 7
)
if groupBy == diploma.GroupByDay {
minDays = 1
maxDays = 30
} else if groupBy == diploma.GroupByMonth {
minDays = 1
maxDays = 30
}
rangeCode, since, until := s.getRandomTimeRange(
metric.Since, metric.Until, minDays, maxDays)
return QueryRecipe{
MetricID: metric.MetricID,
Method: listCumulativePeriods,
RangeCode: rangeCode,
Since: uint32(since),
Until: uint32(until),
GroupBy: groupBy,
}
} else {
var (
minDays = 1
maxDays = 3
)
rangeCode, since, until := s.getRandomTimeRange(
metric.Since, metric.Until, minDays, maxDays)
return QueryRecipe{
MetricID: metric.MetricID,
Method: listCumulativeMeasures,
RangeCode: rangeCode,
Since: uint32(since),
Until: uint32(until),
}
}
} else {
num = rand.Intn(100)
if num < s.listPeriodsProbability {
groupBy := s.groupByOptions[rand.Intn(len(s.groupByOptions))]
var (
minDays = 1
maxDays = 7
)
if groupBy == diploma.GroupByDay {
minDays = 1
maxDays = 30
} else if groupBy == diploma.GroupByMonth {
minDays = 1
maxDays = 30
}
rangeCode, since, until := s.getRandomTimeRange(
metric.Since, metric.Until, minDays, maxDays)
return QueryRecipe{
MetricID: metric.MetricID,
Method: listInstantPeriods,
RangeCode: rangeCode,
Since: uint32(since),
Until: uint32(until),
GroupBy: groupBy,
}
} else {
var (
minDays = 1
maxDays = 3
)
rangeCode, since, until := s.getRandomTimeRange(
metric.Since, metric.Until, minDays, maxDays)
return QueryRecipe{
MetricID: metric.MetricID,
Method: listInstantMeasures,
RangeCode: rangeCode,
Since: uint32(since),
Until: uint32(until),
}
}
}
}
}
// Генерує випадковий набір унікальних metricID з [1, 100]
func (s *RandomQueryGenerator) listRandomUniqueMetricIDs(count int) []uint32 {
// переставляю індекси у випадковому порядку
indexes := rand.Perm(len(s.metrics))
// копіюю metricID із перших випадкових індексів
metricIDs := make([]uint32, count)
for i := range count {
metricIDs[i] = s.metrics[indexes[i]].MetricID
}
return metricIDs
}
const (
secondsPerDay = 86400
dayRange = 0
weekRange = 1
monthRange = 2
randomTimeRange = 3
)
// Випадковий часовий діапазон
func (s *RandomQueryGenerator) getRandomTimeRange(start, end int64, minDays, maxDays int) (int, int64, int64) {
var (
since int64
until int64
num = rand.Intn(100)
rangeCode int
threshold int
)
for rangeCode, threshold = range s.timeRangeProbDistribution {
if num < threshold {
break
}
}
switch rangeCode {
case dayRange:
since = end - secondsPerDay
until = end
case weekRange:
since = end - 7*secondsPerDay
until = end
case monthRange:
since = end - 30*secondsPerDay
until = end
case randomTimeRange:
if start == end {
return rangeCode, start, end
}
// Випадковий момент часу для since
since = start + rand.Int63n(end-start)
// Випадкова тривалість у днях (але не виходити за межу end)
durationInDays := minDays + rand.Intn(maxDays-minDays)
until = since + int64(durationInDays)*secondsPerDay
if until > end {
until = end
}
}
return rangeCode, since, until
}
func (s *RandomQueryGenerator) getRandomMetric() MetricInfo {
return s.metrics[rand.Intn(len(s.metrics))]
}
// EXECUTE QUERY
func execQuery(conn *client.Connection, queryGenerator *RandomQueryGenerator, stat *WorkerStat) (err error) {
recipe := queryGenerator.GetQueryRecipe()
var elapsedTime time.Duration
switch recipe.Method {
case listCurrentValues:
t1 := time.Now()
_, err := conn.ListCurrentValues(recipe.MetricIDs)
elapsedTime = time.Since(t1)
stat.ElapsedTime += elapsedTime
stat.Queries++
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
stat.MethodCalls[recipe.Method]++
if err != nil {
return fmt.Errorf("ListCurrentValues: %s", err)
}
case listInstantMeasures:
t1 := time.Now()
_, err := conn.ListInstantMeasures(proto.ListInstantMeasuresReq{
MetricID: recipe.MetricID,
Since: recipe.Since,
Until: recipe.Until,
})
elapsedTime = time.Since(t1)
stat.ElapsedTime += elapsedTime
stat.Queries++
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
stat.MethodCalls[recipe.Method]++
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
stat.TimeRangeCalls[recipe.RangeCode]++
if err != nil {
return fmt.Errorf("ListInstantMeasures(%d): %s",
recipe.MetricID, err)
}
case listCumulativeMeasures:
t1 := time.Now()
_, err := conn.ListCumulativeMeasures(proto.ListCumulativeMeasuresReq{
MetricID: recipe.MetricID,
Since: recipe.Since,
Until: recipe.Until,
})
elapsedTime = time.Since(t1)
stat.ElapsedTime += elapsedTime
stat.Queries++
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
stat.MethodCalls[recipe.Method]++
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
stat.TimeRangeCalls[recipe.RangeCode]++
if err != nil {
return fmt.Errorf("ListCumulativeMeasures(%d): %s",
recipe.MetricID, err)
}
case listInstantPeriods:
t1 := time.Now()
_, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
MetricID: recipe.MetricID,
Since: recipe.Since,
Until: recipe.Until,
GroupBy: recipe.GroupBy,
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
})
elapsedTime = time.Since(t1)
stat.ElapsedTime += elapsedTime
stat.Queries++
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
stat.MethodCalls[recipe.Method]++
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
stat.TimeRangeCalls[recipe.RangeCode]++
if err != nil {
return fmt.Errorf("ListInstantPeriods(%d): %s",
recipe.MetricID, err)
}
case listCumulativePeriods:
t1 := time.Now()
_, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
MetricID: recipe.MetricID,
Since: recipe.Since,
Until: recipe.Until,
GroupBy: recipe.GroupBy,
})
elapsedTime = time.Since(t1)
stat.ElapsedTime += elapsedTime
stat.Queries++
stat.ElapsedTimeByMethods[recipe.Method] += elapsedTime
stat.MethodCalls[recipe.Method]++
stat.ElapsedTimeByTimeRanges[recipe.RangeCode] += elapsedTime
stat.TimeRangeCalls[recipe.RangeCode]++
if err != nil {
return fmt.Errorf("ListCumulativePeriods(%d): %s",
recipe.MetricID, err)
}
}
return
}

@ -0,0 +1,261 @@
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"os"
"sync"
"time"
"gopkg.in/ini.v1"
"gordenko.dev/dima/diploma/client"
)
const (
listCumulativeMeasures = 0
listCumulativePeriods = 1
listInstantMeasures = 2
listInstantPeriods = 3
listCurrentValues = 4
methodsQty = 5
timeRangesQty = 4
)
var (
methodCodeToName = []string{
"listCumulativeMeasures",
"listCumulativePeriods",
"listInstantMeasures",
"listInstantPeriods",
"listCurrentValues",
}
rangeCodeToName = []string{
"last day",
"last week",
"last month",
"random time range",
}
)
type WorkerStat struct {
Queries int
ElapsedTime time.Duration
MethodCalls []int
ElapsedTimeByMethods []time.Duration
TimeRangeCalls []int
ElapsedTimeByTimeRanges []time.Duration
}
func main() {
var (
iniFileName string
)
flag.Usage = func() {
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
fmt.Fprint(flag.CommandLine.Output(), configExample)
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
flag.PrintDefaults()
}
flag.StringVar(&iniFileName, "c", "loadtest.ini", "path to *.ini config file")
flag.Parse()
config, err := loadConfig(iniFileName)
if err != nil {
log.Fatalln(err)
}
rand.Seed(time.Now().UnixNano())
metrics, err := readMetricInfo(config.MetricsInfo)
if err != nil {
log.Fatalln(err)
}
var (
wg = new(sync.WaitGroup)
stats = make([]*WorkerStat, config.Connections)
queryGenerator = NewRandomQueryGenerator(RandomQueryGeneratorOptions{
Metrics: metrics,
// call method probabilitites
ListCurrentValuesProbability: 50, // current values / others
ListPeriodsProbability: 80, // periods / measures
// time range probabilities
TimeRangeProbabilities: []int{
82, // last day
12, // last week
3, // last month
3, // any range
},
})
)
for i := range stats {
stats[i] = &WorkerStat{
MethodCalls: make([]int, methodsQty),
ElapsedTimeByMethods: make([]time.Duration, methodsQty),
TimeRangeCalls: make([]int, timeRangesQty),
ElapsedTimeByTimeRanges: make([]time.Duration, timeRangesQty),
}
}
t1 := time.Now()
for i := range config.Connections {
wg.Add(1)
go func(stat *WorkerStat) {
defer wg.Done()
conn, err := client.Connect(config.DatabaseAddr)
if err != nil {
log.Fatalln(err)
}
defer conn.Close()
for range config.RequestsPerConn {
err := execQuery(conn, queryGenerator, stat)
if err != nil {
log.Println(err)
}
}
}(stats[i])
}
wg.Wait()
testingTime := time.Since(t1)
var (
methodCalls = make([]int, methodsQty)
elapsedTimeByMethods = make([]time.Duration, methodsQty)
timeRangeCalls = make([]int, timeRangesQty)
elapsedTimeByTimeRanges = make([]time.Duration, timeRangesQty)
totalElapsedTime time.Duration
totalQueries int
avgTimePerQuery time.Duration
rps float64
)
for _, stat := range stats {
totalElapsedTime += stat.ElapsedTime
totalQueries += stat.Queries
for i, elapsedTime := range stat.ElapsedTimeByMethods {
elapsedTimeByMethods[i] += elapsedTime
}
for i, qty := range stat.MethodCalls {
methodCalls[i] += qty
}
for i, elapsedTime := range stat.ElapsedTimeByTimeRanges {
elapsedTimeByTimeRanges[i] += elapsedTime
}
for i, qty := range stat.TimeRangeCalls {
timeRangeCalls[i] += qty
}
}
avgTimePerQuery = totalElapsedTime / time.Duration(totalQueries)
rps = float64(config.Connections*config.RequestsPerConn) / testingTime.Seconds()
fmt.Printf(`TEST RESULTS:
Time: %.0f seconds
Connections: %d
Requests per conn: %d
Total requests: %d
AVG request time: %v
RPS: %d
`,
testingTime.Seconds(), config.Connections, config.RequestsPerConn,
totalQueries, avgTimePerQuery, int(rps))
for i, calls := range methodCalls {
totalElapsedTimeByMethod := elapsedTimeByMethods[i]
methodPercent := float64(calls*100) / float64(totalQueries)
fmt.Printf("%s: %d (%.1f%%), AVG request time: %v\n",
methodCodeToName[i], calls, methodPercent,
totalElapsedTimeByMethod/time.Duration(calls))
}
fmt.Println()
for i, calls := range timeRangeCalls {
totalElapsedTimeByTimeRange := elapsedTimeByTimeRanges[i]
timeRangePercent := float64(calls*100) / float64(totalQueries-methodCalls[listCurrentValues])
fmt.Printf("%s: %d (%.1f%%), AVG request time: %v\n",
rangeCodeToName[i], calls, timeRangePercent,
totalElapsedTimeByTimeRange/time.Duration(calls))
}
}
// CONFIG FILE
const mainUsage = `Usage:
%s -c path/to/config.ini
`
const helpMessage = `Diploma project. Load test. Version: 1.0
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
`
const configExample = `
loadtest.ini example:
databaseAddr = :12345
metricsInfo = ../../datadir/metrics.info
connections = 1000
requestsPerConn = 500
`
type Config struct {
DatabaseAddr string
MetricsInfo string
Connections int
RequestsPerConn int
}
func (s Config) String() string {
return fmt.Sprintf(`starting options:
databaseAddr = %s
metricsInfo = %s
connections = %d
requestsPerConn = %d
`,
s.DatabaseAddr, s.MetricsInfo, s.Connections, s.RequestsPerConn)
}
func loadConfig(iniFileName string) (_ Config, err error) {
file, err := ini.Load(iniFileName)
if err != nil {
return
}
conf := Config{}
top := file.Section("")
conf.DatabaseAddr = top.Key("databaseAddr").String()
conf.MetricsInfo = top.Key("metricsInfo").String()
conf.Connections, err = top.Key("connections").Int()
if err != nil {
err = fmt.Errorf("'connections' option is required in config file")
return
}
conf.RequestsPerConn, err = top.Key("requestsPerConn").Int()
if err != nil {
err = fmt.Errorf("'requestsPerConn' option is required in config file")
return
}
return conf, nil
}

@ -0,0 +1,81 @@
package main
import (
"math/rand"
"time"
"gordenko.dev/dima/diploma/client"
)
func GenerateCumulativeMeasures(days int) []client.Measure {
var (
measures []client.Measure
minutes = []int{14, 29, 44, 59}
hoursPerDay = 24
totalHours = days * hoursPerDay
since = time.Now().AddDate(0, 0, -days)
totalValue float64
)
for i := range totalHours {
hourTime := since.Add(time.Duration(i) * time.Hour)
for _, m := range minutes {
measureTime := time.Date(
hourTime.Year(),
hourTime.Month(),
hourTime.Day(),
hourTime.Hour(),
m, // minutes
0, // seconds
0, // nanoseconds
time.Local,
)
measure := client.Measure{
Timestamp: uint32(measureTime.Unix()),
Value: totalValue,
}
measures = append(measures, measure)
totalValue += rand.Float64()
}
}
return measures
}
func GenerateInstantMeasures(days int, baseValue float64) []client.Measure {
var (
measures []client.Measure
minutes = []int{14, 29, 44, 59}
hoursPerDay = 24
totalHours = days * hoursPerDay
since = time.Now().AddDate(0, 0, -days)
)
for i := range totalHours {
hourTime := since.Add(time.Duration(i) * time.Hour)
for _, m := range minutes {
measureTime := time.Date(
hourTime.Year(),
hourTime.Month(),
hourTime.Day(),
hourTime.Hour(),
m, // minutes
0, // seconds
0, // nanoseconds
time.Local,
)
// value = +-10% from base value
fluctuation := baseValue * 0.1
value := baseValue + (rand.Float64()*2-1)*fluctuation
measure := client.Measure{
Timestamp: uint32(measureTime.Unix()),
Value: value,
}
measures = append(measures, measure)
}
}
return measures
}

@ -0,0 +1,90 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"gopkg.in/ini.v1"
"gordenko.dev/dima/diploma/client"
)
var (
metricTypeToName = []string{
"",
"cumulative",
"instant",
}
)
func main() {
var (
iniFileName string
)
flag.Usage = func() {
fmt.Fprint(flag.CommandLine.Output(), helpMessage)
fmt.Fprint(flag.CommandLine.Output(), configExample)
fmt.Fprintf(flag.CommandLine.Output(), mainUsage, os.Args[0])
flag.PrintDefaults()
}
flag.StringVar(&iniFileName, "c", "requests.ini", "path to *.ini config file")
flag.Parse()
config, err := loadConfig(iniFileName)
if err != nil {
log.Fatalln(err)
}
conn, err := client.Connect(config.DatabaseAddr)
if err != nil {
log.Fatalf("client.Connect(%s): %s\n", config.DatabaseAddr, err)
} else {
fmt.Println("Connected to database")
}
sendRequests(conn)
}
// CONFIG FILE
const mainUsage = `Usage:
%s -c path/to/config.ini
`
const helpMessage = `Diploma project. Example requests. Version: 1.0
created by Dmytro Gordenko, 1.e4.kc6@gmail.com
`
const configExample = `
requests.ini example:
databaseAddr = :12345
`
type Config struct {
DatabaseAddr string
}
func (s Config) String() string {
return fmt.Sprintf(`starting options:
databaseAddr = %s
`,
s.DatabaseAddr)
}
func loadConfig(iniFileName string) (_ Config, err error) {
file, err := ini.Load(iniFileName)
if err != nil {
return
}
conf := Config{}
top := file.Section("")
conf.DatabaseAddr = top.Key("databaseAddr").String()
return conf, nil
}

@ -0,0 +1,361 @@
package main
import (
"fmt"
"log"
"time"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/client"
"gordenko.dev/dima/diploma/proto"
)
func sendRequests(conn *client.Connection) {
var (
instantMetricID uint32 = 10000
cumulativeMetricID uint32 = 10001
fracDigits byte = 2
err error
)
conn.DeleteMetric(instantMetricID)
conn.DeleteMetric(cumulativeMetricID)
// ADD INSTANT METRIC
err = conn.AddMetric(client.Metric{
MetricID: instantMetricID,
MetricType: diploma.Instant,
FracDigits: fracDigits,
})
if err != nil {
log.Fatalf("conn.AddMetric: %s\n", err)
} else {
fmt.Printf("\nInstant metric %d added\n", instantMetricID)
}
// GET INSTANT METRIC
iMetric, err := conn.GetMetric(instantMetricID)
if err != nil {
log.Fatalf("conn.GetMetric: %s\n", err)
} else {
fmt.Printf(`
GetMetric:
metricID: %d
metricType: %s
fracDigits: %d
`,
iMetric.MetricID, metricTypeToName[iMetric.MetricType], fracDigits)
}
// APPEND MEASURES
instantMeasures := GenerateInstantMeasures(62, 220)
err = conn.AppendMeasures(client.AppendMeasuresReq{
MetricID: instantMetricID,
Measures: instantMeasures,
})
if err != nil {
log.Fatalf("conn.AppendMeasures: %s\n", err)
} else {
fmt.Printf("\nAppended %d measures for the metric %d\n",
len(instantMeasures), instantMetricID)
}
// LIST INSTANT MEASURES
lastTimestamp := instantMeasures[len(instantMeasures)-1].Timestamp
until := time.Unix(int64(lastTimestamp), 0)
since := until.Add(-5 * time.Hour)
instantList, err := conn.ListInstantMeasures(proto.ListInstantMeasuresReq{
MetricID: instantMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
})
if err != nil {
log.Fatalf("conn.ListInstantMeasures: %s\n", err)
} else {
fmt.Printf("\nListInstantMeasures %s - %s:\n",
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
for _, item := range instantList {
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
}
}
// LIST ALL INSTANT MEASURES
instantList, err = conn.ListAllInstantMeasures(instantMetricID)
if err != nil {
log.Fatalf("conn.ListAllInstantMeasures: %s\n", err)
} else {
fmt.Printf("\nListAllInstantMeasures (last 15 items):\n")
for _, item := range instantList[:15] {
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
}
}
// LIST INSTANT PERIODS (group by hour)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.Add(-24 * time.Hour)
instantPeriods, err := conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
MetricID: instantMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByHour,
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
})
if err != nil {
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
} else {
fmt.Printf("\nListInstantPeriods (1 day, group by hour):\n")
for _, item := range instantPeriods {
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatHourPeriod(item.Period), item.Min, item.Max, item.Avg)
}
}
// LIST INSTANT PERIODS (group by day)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.AddDate(0, 0, -7)
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
MetricID: instantMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByDay,
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
})
if err != nil {
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
} else {
fmt.Printf("\nListInstantPeriods (7 days, group by day):\n")
for _, item := range instantPeriods {
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatDayPeriod(item.Period), item.Min, item.Max, item.Avg)
}
}
// LIST INSTANT PERIODS (group by month)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.AddDate(0, 0, -62)
instantPeriods, err = conn.ListInstantPeriods(proto.ListInstantPeriodsReq{
MetricID: instantMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByMonth,
AggregateFuncs: diploma.AggregateMin | diploma.AggregateMax | diploma.AggregateAvg,
})
if err != nil {
log.Fatalf("conn.ListInstantPeriods: %s\n", err)
} else {
fmt.Printf("\nListInstantPeriods (62 days, group by month):\n")
for _, item := range instantPeriods {
fmt.Printf(" %s => min %.2f, max %.2f, avg %.2f\n", formatMonthPeriod(item.Period), item.Min, item.Max, item.Avg)
}
}
// DELETE INSTANT METRIC MEASURES
err = conn.DeleteMeasures(proto.DeleteMeasuresReq{
MetricID: instantMetricID,
})
if err != nil {
log.Fatalf("conn.DeleteMeasures: %s\n", err)
} else {
fmt.Printf("\nInstant metric %d measures deleted\n", instantMetricID)
}
// DELETE INSTANT METRIC
err = conn.DeleteMetric(instantMetricID)
if err != nil {
log.Fatalf("conn.DeleteMetric: %s\n", err)
} else {
fmt.Printf("\nInstant metric %d deleted\n", instantMetricID)
}
// ADD CUMULATIVE METRIC
err = conn.AddMetric(client.Metric{
MetricID: cumulativeMetricID,
MetricType: diploma.Cumulative,
FracDigits: fracDigits,
})
if err != nil {
log.Fatalf("conn.AddMetric: %s\n", err)
} else {
fmt.Printf("\nCumulative metric %d added\n", cumulativeMetricID)
}
// GET CUMULATIVE METRIC
cMetric, err := conn.GetMetric(cumulativeMetricID)
if err != nil {
log.Fatalf("conn.GetMetric: %s\n", err)
} else {
fmt.Printf(`
GetMetric:
metricID: %d
metricType: %s
fracDigits: %d
`,
cMetric.MetricID, metricTypeToName[cMetric.MetricType], fracDigits)
}
// APPEND MEASURES
cumulativeMeasures := GenerateCumulativeMeasures(62)
err = conn.AppendMeasures(client.AppendMeasuresReq{
MetricID: cumulativeMetricID,
Measures: cumulativeMeasures,
})
if err != nil {
log.Fatalf("conn.AppendMeasures: %s\n", err)
} else {
fmt.Printf("\nAppended %d measures for the metric %d\n",
len(cumulativeMeasures), cumulativeMetricID)
}
// LIST CUMULATIVE MEASURES
lastTimestamp = cumulativeMeasures[len(cumulativeMeasures)-1].Timestamp
until = time.Unix(int64(lastTimestamp), 0)
since = until.Add(-5 * time.Hour)
cumulativeList, err := conn.ListCumulativeMeasures(proto.ListCumulativeMeasuresReq{
MetricID: cumulativeMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
})
if err != nil {
log.Fatalf("conn.ListCumulativeMeasures: %s\n", err)
} else {
fmt.Printf("\nListCumulativeMeasures %s - %s:\n",
formatTime(uint32(since.Unix())), formatTime(uint32(until.Unix())))
for _, item := range cumulativeList {
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
}
}
// LIST ALL CUMULATIVE MEASURES
cumulativeList, err = conn.ListAllCumulativeMeasures(cumulativeMetricID)
if err != nil {
log.Fatalf("conn.ListAllCumulativeMeasures: %s\n", err)
} else {
fmt.Printf("\nListAllCumulativeMeasures (last 15 items):\n")
for _, item := range cumulativeList[:15] {
fmt.Printf(" %s => %.2f\n", formatTime(item.Timestamp), item.Value)
}
}
// LIST CUMULATIVE PERIODS (group by hour)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.Add(-24 * time.Hour)
cumulativePeriods, err := conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
MetricID: cumulativeMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByHour,
})
if err != nil {
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
} else {
fmt.Printf("\nListCumulativePeriods (1 day, group by hour):\n")
for _, item := range cumulativePeriods {
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatHourPeriod(item.Period), item.EndValue, item.Total)
}
}
// LIST CUMULATIVE PERIODS (group by day)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.AddDate(0, 0, -7)
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
MetricID: cumulativeMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByDay,
})
if err != nil {
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
} else {
fmt.Printf("\nListCumulativePeriods (7 days, group by day):\n")
for _, item := range cumulativePeriods {
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatDayPeriod(item.Period), item.EndValue, item.Total)
}
}
// LIST CUMULATIVE PERIODS (group by day)
until = time.Unix(int64(lastTimestamp+1), 0)
since = until.AddDate(0, 0, -62)
cumulativePeriods, err = conn.ListCumulativePeriods(proto.ListCumulativePeriodsReq{
MetricID: cumulativeMetricID,
Since: uint32(since.Unix()),
Until: uint32(until.Unix()),
GroupBy: diploma.GroupByMonth,
})
if err != nil {
log.Fatalf("conn.ListCumulativePeriods: %s\n", err)
} else {
fmt.Printf("\nListCumulativePeriods (62 days, group by month):\n")
for _, item := range cumulativePeriods {
fmt.Printf(" %s => end value %.2f, total %.2f\n", formatMonthPeriod(item.Period), item.EndValue, item.Total)
}
}
// DELETE CUMULATIVE METRIC MEASURES
err = conn.DeleteMeasures(proto.DeleteMeasuresReq{
MetricID: cumulativeMetricID,
})
if err != nil {
log.Fatalf("conn.DeleteMeasures: %s\n", err)
} else {
fmt.Printf("\nCumulative metric %d measures deleted\n", cumulativeMetricID)
}
// DELETE CUMULATIVE METRIC
err = conn.DeleteMetric(cumulativeMetricID)
if err != nil {
log.Fatalf("conn.DeleteMetric: %s\n", err)
} else {
fmt.Printf("\nCumulative metric %d deleted\n", cumulativeMetricID)
}
}
const datetimeLayout = "2006-01-02 15:04:05"
func formatTime(timestamp uint32) string {
tm := time.Unix(int64(timestamp), 0)
return tm.Format(datetimeLayout)
}
func formatHourPeriod(period uint32) string {
tm := time.Unix(int64(period), 0)
return tm.Format("2006-01-02 15:00 - 15") + ":59"
}
func formatDayPeriod(period uint32) string {
tm := time.Unix(int64(period), 0)
return tm.Format("2006-01-02")
}
func formatMonthPeriod(period uint32) string {
tm := time.Unix(int64(period), 0)
return tm.Format("2006-01")
}

@ -0,0 +1,72 @@
package freelist
import (
"fmt"
"sync"
"github.com/RoaringBitmap/roaring/v2"
)
type FreeList struct {
mutex sync.Mutex
free *roaring.Bitmap
reserved *roaring.Bitmap
}
func New() *FreeList {
return &FreeList{
free: roaring.New(),
reserved: roaring.New(),
}
}
func (s *FreeList) Restore(serialized []byte) error {
err := s.free.UnmarshalBinary(serialized)
if err != nil {
return fmt.Errorf("UnmarshalBinary: %s", err)
}
return nil
}
func (s *FreeList) AddPages(pageNumbers []uint32) {
if len(pageNumbers) == 0 {
return
}
s.mutex.Lock()
s.free.AddMany(pageNumbers)
s.mutex.Unlock()
}
// ReserveDataPage - аллокатор резервирует страницу, но не удаляет до визова
// DeleteFromFree, ибо транзакция может не завершится, а между віделением страници
// и падением транзакции - будет создан init файл.
func (s *FreeList) ReservePage() (pageNo uint32) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.free.IsEmpty() {
return
}
pageNo = s.free.Minimum()
s.free.Remove(pageNo)
s.reserved.Add(pageNo)
return
}
// Удаляет ранее зарезервированные страницы
func (s *FreeList) DeletePages(pageNumbers []uint32) {
s.mutex.Lock()
for _, pageNo := range pageNumbers {
s.reserved.Remove(pageNo)
s.free.Remove(pageNo) // прокрута TransactionLog
}
s.mutex.Unlock()
}
func (s *FreeList) Serialize() ([]byte, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
tmp := roaring.Or(s.free, s.reserved)
tmp.RunOptimize()
return tmp.ToBytes()
}

@ -0,0 +1,13 @@
module gordenko.dev/dima/diploma
go 1.24.0
require (
github.com/RoaringBitmap/roaring/v2 v2.5.0
gopkg.in/ini.v1 v1.67.0
)
require (
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
)

@ -0,0 +1,20 @@
github.com/RoaringBitmap/roaring/v2 v2.5.0 h1:TJ45qCM7D7fIEBwKd9zhoR0/S1egfnSSIzLU1e1eYLY=
github.com/RoaringBitmap/roaring/v2 v2.5.0/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

@ -0,0 +1,11 @@
cd examples/database
env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ../../database_linux
cd -
cd examples/loadtest
env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ../../loadtest_linux
cd -
cd examples/requests
env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ../../requests_linux
cd -

@ -0,0 +1,9 @@
# host:port
databaseAddr = :12345
# path to metrics.info file
metricsInfo = testdir/metrics.info
# the number of concurrently open connections.
connections = 1000
# send requests per one connection
requestsPerConn = 500

Binary file not shown.

Binary file not shown.

@ -0,0 +1,473 @@
package proto
import (
"fmt"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/bufreader"
)
const (
TypeDeleteMeasures byte = 1
TypeListCurrentValues byte = 2
TypeListInstantMeasures byte = 3
TypeListCumulativeMeasures byte = 33
TypeListInstantPeriods byte = 4
TypeListCumulativePeriods byte = 44
TypeGetMetric byte = 5
TypeAddMetric byte = 6
TypeListAllInstantMeasures byte = 8
TypeListAllCumulativeMeasures byte = 88
TypeRangeTotal byte = 9
TypeAppendMeasure byte = 10
TypeAppendMeasures byte = 11
TypeDeleteMetric byte = 12
RespPartOfValue byte = 255
RespEndOfValue byte = 254
RespError byte = 253
RespSuccess byte = 252
RespValue byte = 251
ErrNoMetric = 1
ErrDuplicate = 2
ErrWrongMetricType = 3
ErrWrongFracDigits = 4
ErrExpiredMeasure = 5
ErrNonMonotonicValue = 6
ErrEmptyMetricID = 7
ErrInvalidRange = 8
ErrUnexpected = 9
)
func ErrorCodeToText(code uint16) string {
switch code {
case ErrNoMetric:
return "NoMetric"
case ErrDuplicate:
return "Duplicate"
case ErrWrongMetricType:
return "WrongMetricType"
case ErrWrongFracDigits:
return "WrongFracDigits"
case ErrExpiredMeasure:
return "ExpiredMeasure"
case ErrNonMonotonicValue:
return "NonMonotonicValue"
case ErrEmptyMetricID:
return "EmptyMetricID"
case ErrInvalidRange:
return "InvalidRange"
case ErrUnexpected:
return "Unexpected"
default:
return ""
}
}
type GetMetricReq struct {
MetricID uint32
}
type ListCurrentValuesReq struct {
MetricIDs []uint32
}
type AddMetricReq struct {
MetricID uint32
MetricType octopus.MetricType
FracDigits int
}
type UpdateMetricReq struct {
MetricID uint32
MetricType octopus.MetricType
FracDigits int
}
type DeleteMetricReq struct {
MetricID uint32
}
type DeleteMeasuresReq struct {
MetricID uint32
Since uint32 // timestamp (optional)
}
type AppendMeasureReq struct {
MetricID uint32
Timestamp uint32
Value float64
}
type ListAllInstantMetricMeasuresReq struct {
MetricID uint32
}
type ListAllCumulativeMeasuresReq struct {
MetricID uint32
}
type ListInstantMeasuresReq struct {
MetricID uint32
Since uint32
Until uint32
FirstHourOfDay int
}
type ListCumulativeMeasuresReq struct {
MetricID uint32
Since uint32
Until uint32
FirstHourOfDay int
}
type ListInstantPeriodsReq struct {
MetricID uint32
Since uint32
Until uint32
GroupBy octopus.GroupBy
AggregateFuncs byte
FirstHourOfDay int
LastDayOfMonth int
}
type ListCumulativePeriodsReq struct {
MetricID uint32
Since uint32
Until uint32
GroupBy octopus.GroupBy
FirstHourOfDay int
LastDayOfMonth int
}
type Metric struct {
MetricID uint32
MetricType octopus.MetricType
FracDigits int
}
type RangeTotalReq struct {
MetricID uint32
Since uint32
Until uint32
}
func PackAddMetricReq(req AddMetricReq) []byte {
arr := []byte{
TypeAddMetric,
0, 0, 0, 0, //
byte(req.MetricType),
byte(req.FracDigits),
}
bin.PutUint32(arr[1:], req.MetricID)
return arr
}
func PackDeleteMetricReq(req DeleteMetricReq) []byte {
arr := []byte{
TypeDeleteMetric,
0, 0, 0, 0, // metricID
}
bin.PutUint32(arr[1:], req.MetricID)
return arr
}
func PackAppendMeasure(req AppendMeasureReq) []byte {
arr := []byte{
TypeAppendMeasure,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // timestamp
0, 0, 0, 0, 0, 0, 0, 0, // value
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], uint32(req.Timestamp))
bin.PutFloat64(arr[9:], req.Value)
return arr
}
func PackDeleteMeasuresReq(req DeleteMeasuresReq) []byte {
arr := []byte{
TypeDeleteMeasures,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // since
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], uint32(req.Since))
return arr
}
// UNPACK reqs
func UnpackAddMetricReq(arr []byte) (m AddMetricReq) {
m.MetricID = bin.GetUint32(arr)
m.MetricType = octopus.MetricType(arr[4])
m.FracDigits = int(arr[5])
return
}
func UnpackUpdateMetricReq(arr []byte) (m UpdateMetricReq) {
m.MetricID = bin.GetUint32(arr)
m.MetricType = octopus.MetricType(arr[4])
m.FracDigits = int(arr[5])
return
}
func UnpackDeleteMetricReq(arr []byte) (m DeleteMetricReq) {
m.MetricID = bin.GetUint32(arr)
return
}
func UnpackAppendMeasureReq(arr []byte) (m AppendMeasureReq) {
m.MetricID = bin.GetUint32(arr)
m.Timestamp = bin.GetUint32(arr[4:])
m.Value = bin.GetFloat64(arr[8:])
return
}
func UnpackDeleteMeasuresReq(arr []byte) (m DeleteMeasuresReq) {
m.MetricID = bin.GetUint32(arr)
m.Since = bin.GetUint32(arr[4:])
return
}
func UnpackListInstantMeasuresReq(arr []byte) (m ListInstantMeasuresReq) {
m.MetricID = bin.GetUint32(arr[0:])
m.Since = bin.GetUint32(arr[4:])
m.Until = bin.GetUint32(arr[8:])
m.FirstHourOfDay = int(arr[12])
return
}
func UnpackListCumulativeMeasuresReq(arr []byte) (m ListCumulativeMeasuresReq) {
m.MetricID = bin.GetUint32(arr)
m.Since = bin.GetUint32(arr[4:])
m.Until = bin.GetUint32(arr[8:])
m.FirstHourOfDay = int(arr[12])
return
}
func UnpackListInstantPeriodsReq(arr []byte) (m ListInstantPeriodsReq) {
m.MetricID = bin.GetUint32(arr)
m.Since = bin.GetUint32(arr[4:])
m.Until = bin.GetUint32(arr[8:])
m.GroupBy = octopus.GroupBy(arr[12])
m.AggregateFuncs = arr[13]
m.FirstHourOfDay = int(arr[14])
m.LastDayOfMonth = int(arr[15])
return
}
func UnpackListCumulativePeriodsReq(arr []byte) (m ListCumulativePeriodsReq) {
m.MetricID = bin.GetUint32(arr[0:])
m.Since = bin.GetUint32(arr[4:])
m.Until = bin.GetUint32(arr[8:])
m.GroupBy = octopus.GroupBy(arr[12])
m.FirstHourOfDay = int(arr[13])
m.LastDayOfMonth = int(arr[14])
return
}
func UnpackRangeTotalReq(arr []byte) (m RangeTotalReq) {
m.MetricID = bin.GetUint32(arr)
m.Since = bin.GetUint32(arr[4:])
m.Until = bin.GetUint32(arr[8:])
return
}
// READ reqs
func ReadGetMetricReq(r *bufreader.BufferedReader) (m GetMetricReq, err error) {
m.MetricID, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return
}
func ReadAddMetricReq(r *bufreader.BufferedReader) (m AddMetricReq, err error) {
arr, err := r.ReadN(6)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackAddMetricReq(arr), nil
}
func ReadUpdateMetricReq(r *bufreader.BufferedReader) (m UpdateMetricReq, err error) {
arr, err := r.ReadN(6)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackUpdateMetricReq(arr), nil
}
func ReadDeleteMetricReq(r *bufreader.BufferedReader) (m DeleteMetricReq, err error) {
m.MetricID, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return
}
func ReadAppendMeasureReq(r *bufreader.BufferedReader) (m AppendMeasureReq, err error) {
arr, err := r.ReadN(16)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackAppendMeasureReq(arr), nil
}
func ReadDeleteMeasuresReq(r *bufreader.BufferedReader) (m DeleteMeasuresReq, err error) {
arr, err := r.ReadN(8)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackDeleteMeasuresReq(arr), nil
}
func ReadListAllInstantMeasuresReq(r *bufreader.BufferedReader) (m ListAllInstantMetricMeasuresReq, err error) {
m.MetricID, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return
}
func ReadListAllCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListAllCumulativeMeasuresReq, err error) {
m.MetricID, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return
}
func ReadListInstantMeasuresReq(r *bufreader.BufferedReader) (m ListInstantMeasuresReq, err error) {
arr, err := r.ReadN(13)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackListInstantMeasuresReq(arr), nil
}
func ReadListCumulativeMeasuresReq(r *bufreader.BufferedReader) (m ListCumulativeMeasuresReq, err error) {
arr, err := r.ReadN(13)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackListCumulativeMeasuresReq(arr), nil
}
func ReadListInstantPeriodsReq(r *bufreader.BufferedReader) (m ListInstantPeriodsReq, err error) {
arr, err := r.ReadN(16)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackListInstantPeriodsReq(arr), nil
}
func ReadListCumulativePeriodsReq(r *bufreader.BufferedReader) (m ListCumulativePeriodsReq, err error) {
arr, err := r.ReadN(15)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackListCumulativePeriodsReq(arr), nil
}
func ReadRangeTotalReq(r *bufreader.BufferedReader) (m RangeTotalReq, err error) {
arr, err := r.ReadN(12)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
return UnpackRangeTotalReq(arr), nil
}
func ReadListCurrentValuesReq(r *bufreader.BufferedReader) (m ListCurrentValuesReq, err error) {
qty, err := bin.ReadUint16(r)
if err != nil {
err = fmt.Errorf("read req: %s", err)
return
}
for i := range int(qty) {
var metricID uint32
metricID, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read metricID (#%d): %s", i, err)
return
}
m.MetricIDs = append(m.MetricIDs, metricID)
}
return
}
type AppendMeasuresReq struct {
MetricID uint32
Measures []Measure
}
type Measure struct {
Timestamp uint32
Value float64
}
func PackAppendMeasures(req AppendMeasuresReq) []byte {
if len(req.Measures) > 65535 {
panic(fmt.Errorf("wrong measures qty: %d", len(req.Measures)))
}
var (
prefixSize = 7
recordSize = 12
arr = make([]byte, prefixSize+len(req.Measures)*recordSize)
)
arr[0] = TypeAppendMeasures
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint16(arr[5:], uint16(len(req.Measures)))
pos := prefixSize
for _, measure := range req.Measures {
bin.PutUint32(arr[pos:], measure.Timestamp)
bin.PutFloat64(arr[pos+4:], measure.Value)
pos += recordSize
}
return arr
}
func ReadAppendMeasuresReq(r *bufreader.BufferedReader) (m AppendMeasuresReq, err error) {
prefix, err := bin.ReadN(r, 6) // metricID + measures qty
if err != nil {
err = fmt.Errorf("read prefix: %s", err)
return
}
m.MetricID = bin.GetUint32(prefix[0:])
qty := bin.GetUint16(prefix[4:])
for i := range int(qty) {
var measure Measure
measure.Timestamp, err = bin.ReadUint32(r)
if err != nil {
err = fmt.Errorf("read timestamp (#%d): %s", i, err)
return
}
measure.Value, err = bin.ReadFloat64(r)
if err != nil {
err = fmt.Errorf("read value (#%d): %s", i, err)
return
}
m.Measures = append(m.Measures, measure)
}
return
}

@ -0,0 +1,255 @@
package recovery
import (
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
)
var (
reChanges = regexp.MustCompile(`(\d+)\.changes`)
reSnapshot = regexp.MustCompile(`(\d+)\.snapshot`)
)
func joinChangesFileName(dir string, logNumber int) string {
return filepath.Join(dir, fmt.Sprintf("%d.changes", logNumber))
}
type RecoveryRecipe struct {
Snapshot string
Changes []string
LogNumber int
ToDelete []string
CompleteSnapshot bool // флаг - что нужно завершить создание снапшота
}
type RecoveryAdvisor struct {
dir string
verifySnapshot func(string) (bool, error) // (fileName) isVerified, error
}
type RecoveryAdvisorOptions struct {
Dir string
VerifySnapshot func(string) (bool, error)
}
func NewRecoveryAdvisor(opt RecoveryAdvisorOptions) (*RecoveryAdvisor, error) {
if opt.Dir == "" {
return nil, errors.New("Dir option is required")
}
if opt.VerifySnapshot == nil {
return nil, errors.New("VerifySnapshot option is required")
}
return &RecoveryAdvisor{
dir: opt.Dir,
verifySnapshot: opt.VerifySnapshot,
}, nil
}
type SnapshotChangesPair struct {
SnapshotFileName string
ChangesFileName string
LogNumber int
}
func (s *RecoveryAdvisor) getSnapshotChangesPairs() (*SnapshotChangesPair, *SnapshotChangesPair, error) {
var (
numSet = make(map[int]bool)
changesSet = make(map[int]bool)
snapshotsSet = make(map[int]bool)
pairs []SnapshotChangesPair
)
entries, err := os.ReadDir(s.dir)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.Type().IsRegular() {
baseName := entry.Name()
groups := reChanges.FindStringSubmatch(baseName)
if len(groups) == 2 {
num, _ := strconv.Atoi(groups[1])
numSet[num] = true
changesSet[num] = true
}
groups = reSnapshot.FindStringSubmatch(baseName)
if len(groups) == 2 {
num, _ := strconv.Atoi(groups[1])
numSet[num] = true
snapshotsSet[num] = true
}
}
}
for logNumber := range numSet {
var (
snapshotFileName string
changesFileName string
)
if changesSet[logNumber] {
changesFileName = joinChangesFileName(s.dir, logNumber)
}
if snapshotsSet[logNumber] {
snapshotFileName = filepath.Join(s.dir, fmt.Sprintf("%d.snapshot", logNumber))
}
pairs = append(pairs, SnapshotChangesPair{
ChangesFileName: changesFileName,
SnapshotFileName: snapshotFileName,
LogNumber: logNumber,
})
}
if len(pairs) == 0 {
return nil, nil, nil
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].LogNumber > pairs[j].LogNumber
})
pair := pairs[0]
if pair.ChangesFileName == "" {
return nil, nil, fmt.Errorf("has %d.shapshot file, but %d.changes file not found",
pair.LogNumber, pair.LogNumber)
}
if len(pairs) > 1 {
prevPair := pairs[1]
if prevPair.SnapshotFileName == "" && prevPair.LogNumber != 1 {
return &pair, nil, nil
}
if prevPair.ChangesFileName == "" && pair.SnapshotFileName == "" {
return &pair, nil, nil
}
return &pair, &prevPair, nil
} else {
return &pair, nil, nil
}
}
func (s *RecoveryAdvisor) GetRecipe() (*RecoveryRecipe, error) {
pair, prevPair, err := s.getSnapshotChangesPairs()
if err != nil {
return nil, err
}
if pair == nil {
return nil, nil
}
if pair.SnapshotFileName != "" {
isVerified, err := s.verifySnapshot(pair.SnapshotFileName)
if err != nil {
return nil, fmt.Errorf("verifySnapshot %s: %s",
pair.SnapshotFileName, err)
}
if isVerified {
recipe := &RecoveryRecipe{
Snapshot: pair.SnapshotFileName,
Changes: []string{
pair.ChangesFileName,
},
LogNumber: pair.LogNumber,
}
if prevPair != nil {
if prevPair.ChangesFileName != "" {
recipe.ToDelete = append(recipe.ToDelete, prevPair.ChangesFileName)
}
if prevPair.SnapshotFileName != "" {
recipe.ToDelete = append(recipe.ToDelete, prevPair.SnapshotFileName)
}
}
return recipe, nil
}
if prevPair != nil {
return s.tryPrevPair(pair, prevPair)
}
return nil, fmt.Errorf("%d.shapshot is corrupted", pair.LogNumber)
} else {
if prevPair != nil {
return s.tryPrevPair(pair, prevPair)
} else {
if pair.LogNumber == 1 {
return &RecoveryRecipe{
Changes: []string{
pair.ChangesFileName,
},
LogNumber: pair.LogNumber,
}, nil
} else {
return nil, fmt.Errorf("%d.snapshot not found", pair.LogNumber)
}
}
}
}
func (s *RecoveryAdvisor) tryPrevPair(pair, prevPair *SnapshotChangesPair) (*RecoveryRecipe, error) {
if prevPair.ChangesFileName == "" {
if pair.SnapshotFileName != "" {
return nil, fmt.Errorf("%d.shapshot is corrupted and %d.changes not found",
pair.LogNumber, prevPair.LogNumber)
} else {
return nil, fmt.Errorf("%d.changes not found", prevPair.LogNumber)
}
}
if prevPair.SnapshotFileName == "" {
if prevPair.LogNumber == 1 {
recipe := &RecoveryRecipe{
Changes: []string{
prevPair.ChangesFileName,
pair.ChangesFileName,
},
LogNumber: pair.LogNumber,
CompleteSnapshot: true,
ToDelete: []string{
prevPair.ChangesFileName,
},
}
return recipe, nil
} else {
if pair.SnapshotFileName != "" {
return nil, fmt.Errorf("%d.shapshot is corrupted and %d.snapshot not found",
pair.LogNumber, prevPair.LogNumber)
} else {
return nil, fmt.Errorf("%d.snapshot not found", pair.LogNumber)
}
}
}
isVerified, err := s.verifySnapshot(prevPair.SnapshotFileName)
if err != nil {
return nil, fmt.Errorf("verifySnapshot %s: %s",
prevPair.SnapshotFileName, err)
}
if !isVerified {
return nil, fmt.Errorf("%d.shapshot is corrupted", prevPair.LogNumber)
}
recipe := &RecoveryRecipe{
Snapshot: prevPair.SnapshotFileName,
Changes: []string{
prevPair.ChangesFileName,
pair.ChangesFileName,
},
LogNumber: pair.LogNumber,
CompleteSnapshot: true,
ToDelete: []string{
prevPair.ChangesFileName,
prevPair.SnapshotFileName,
},
}
return recipe, nil
}

@ -0,0 +1 @@
databaseAddr = :12345

Binary file not shown.

Binary file not shown.

@ -0,0 +1,39 @@
package timeutil
import "time"
func FirstSecondInPeriod(since time.Time, period string) (_ time.Time) {
y, m, d := since.Date()
switch period {
case "h":
h := since.Hour()
return time.Date(y, m, d, h, 0, 0, 0, time.Local)
case "d":
return time.Date(y, m, d, 0, 0, 0, 0, time.Local)
case "m":
return time.Date(y, m, 1, 0, 0, 0, 0, time.Local)
default:
return since
}
}
func LastSecondInPeriod(until time.Time, period string) (_ time.Time) {
y, m, d := until.Date()
switch period {
case "h":
h := until.Hour()
return time.Date(y, m, d, h, 59, 59, 0, time.Local)
case "d":
return time.Date(y, m, d, 23, 59, 59, 0, time.Local)
case "m":
tm := time.Date(y, m, 1, 23, 59, 59, 0, time.Local)
// Додаю місяць
tm = tm.AddDate(0, 1, 0)
// Віднімаю день
return tm.AddDate(0, 0, -1)
default:
return until
}
}

@ -0,0 +1,346 @@
package txlog
import (
"bufio"
"bytes"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/proto"
)
type Reader struct {
file *os.File
reader *bufio.Reader
}
type ReaderOptions struct {
FileName string
BufferSize int
}
func NewReader(opt ReaderOptions) (*Reader, error) {
if opt.FileName == "" {
return nil, errors.New("FileName option is required")
}
if opt.BufferSize <= 0 {
return nil, errors.New("BufferSize option is required")
}
file, err := os.Open(opt.FileName)
if err != nil {
return nil, err
}
return &Reader{
file: file,
reader: bufio.NewReaderSize(file, 1024*1024),
}, nil
}
func (s *Reader) Close() {
s.file.Close()
}
func (s *Reader) ReadPacket() (uint32, []any, bool, error) {
prefix := make([]byte, packetPrefixSize)
n, err := s.reader.Read(prefix)
if err != nil {
if err == io.EOF && n == 0 {
return 0, nil, true, nil
} else {
return 0, nil, false, fmt.Errorf("read packet prefix: %s", err)
}
}
length := bin.GetUint32(prefix[lengthIdx:])
storedCRC := bin.GetUint32(prefix[checksumIdx:])
lsn := bin.GetUint32(prefix[lsnIdx:])
body, err := bin.ReadN(s.reader, int(length))
if err != nil {
return 0, nil, false, fmt.Errorf("read packet body: %s", err)
}
hasher := crc32.NewIEEE()
hasher.Write(prefix[lsnIdx:])
hasher.Write(body)
calculatedCRC := hasher.Sum32()
if calculatedCRC != storedCRC {
return 0, nil, false, fmt.Errorf("stored CRC %d != calculated CRC %d",
storedCRC, calculatedCRC)
}
records, err := s.parseRecords(body)
if err != nil {
return 0, nil, false, err
}
return lsn, records, false, nil
}
func (s *Reader) parseRecords(body []byte) ([]any, error) {
var (
src = bytes.NewBuffer(body)
records []any
)
for {
recordType, err := src.ReadByte()
if err != nil {
if err == io.EOF {
return records, nil
}
return nil, err
}
switch recordType {
case CodeAddedMetric:
var rec AddedMetric
rec, err = s.readAddedMetric(src)
if err != nil {
return nil, err
}
records = append(records, rec)
case CodeDeletedMetric:
var rec DeletedMetric
rec, err = s.readDeletedMetric(src)
if err != nil {
return nil, err
}
records = append(records, rec)
case CodeAppendedMeasure:
var rec AppendedMeasure
rec, err = s.readAppendedMeasure(src)
if err != nil {
return nil, err
}
records = append(records, rec)
case CodeAppendedMeasures:
var rec AppendedMeasures
rec, err = s.readAppendedMeasures(src)
if err != nil {
return nil, err
}
records = append(records, rec)
case CodeAppendedMeasureWithOverflow:
var rec AppendedMeasureWithOverflow
rec, err = s.readAppendedMeasureWithOverflow(src)
if err != nil {
return nil, err
}
records = append(records, rec)
case CodeDeletedMeasures:
var rec DeletedMeasures
rec, err = s.readDeletedMeasures(src)
if err != nil {
return nil, err
}
records = append(records, rec)
default:
return nil, fmt.Errorf("unknown record type code: %d", recordType)
}
}
}
func (s *Reader) readAddedMetric(src *bytes.Buffer) (_ AddedMetric, err error) {
arr, err := bin.ReadN(src, 6)
if err != nil {
return
}
return AddedMetric{
MetricID: bin.GetUint32(arr),
MetricType: diploma.MetricType(arr[4]),
FracDigits: int(arr[5]),
}, nil
}
func (s *Reader) readDeletedMetric(src *bytes.Buffer) (_ DeletedMetric, err error) {
var rec DeletedMetric
rec.MetricID, err = bin.ReadUint32(src)
if err != nil {
return
}
// free data pages
dataQty, _, err := bin.ReadVarUint64(src)
if err != nil {
return
}
for range dataQty {
var pageNo uint32
pageNo, err = bin.ReadUint32(src)
if err != nil {
return
}
rec.FreeDataPages = append(rec.FreeDataPages, pageNo)
}
// free index pages
indexQty, _, err := bin.ReadVarUint64(src)
if err != nil {
return
}
for range indexQty {
var pageNo uint32
pageNo, err = bin.ReadUint32(src)
if err != nil {
return
}
rec.FreeIndexPages = append(rec.FreeIndexPages, pageNo)
}
return rec, nil
}
func (s *Reader) readAppendedMeasure(src *bytes.Buffer) (_ AppendedMeasure, err error) {
arr, err := bin.ReadN(src, 16)
if err != nil {
return
}
return AppendedMeasure{
MetricID: bin.GetUint32(arr[0:]),
Timestamp: bin.GetUint32(arr[4:]),
Value: bin.GetFloat64(arr[8:]),
}, nil
}
func (s *Reader) readAppendedMeasures(src *bytes.Buffer) (_ AppendedMeasures, err error) {
var rec AppendedMeasures
rec.MetricID, err = bin.ReadUint32(src)
if err != nil {
return
}
qty, err := bin.ReadUint16(src)
if err != nil {
return
}
for range qty {
var measure proto.Measure
measure.Timestamp, err = bin.ReadUint32(src)
if err != nil {
return
}
measure.Value, err = bin.ReadFloat64(src)
if err != nil {
return
}
rec.Measures = append(rec.Measures, measure)
}
return rec, nil
}
func (s *Reader) readAppendedMeasureWithOverflow(src *bytes.Buffer) (_ AppendedMeasureWithOverflow, err error) {
var (
b byte
rec AppendedMeasureWithOverflow
)
rec.MetricID, err = bin.ReadUint32(src)
if err != nil {
return
}
rec.Timestamp, err = bin.ReadUint32(src)
if err != nil {
return
}
rec.Value, err = bin.ReadFloat64(src)
if err != nil {
return
}
b, err = src.ReadByte()
if err != nil {
return
}
rec.IsDataPageReused = b == 1
rec.DataPageNo, err = bin.ReadUint32(src)
if err != nil {
return
}
b, err = src.ReadByte()
if err != nil {
return
}
if b == 1 {
rec.IsRootChanged = true
rec.RootPageNo, err = bin.ReadUint32(src)
if err != nil {
return
}
}
// index pages
indexQty, err := src.ReadByte()
if err != nil {
return
}
for range indexQty {
var pageNo uint32
pageNo, err = bin.ReadUint32(src)
if err != nil {
return
}
rec.ReusedIndexPages = append(rec.ReusedIndexPages, pageNo)
}
return rec, nil
}
func (s *Reader) readDeletedMeasures(src *bytes.Buffer) (_ DeletedMeasures, err error) {
var (
rec DeletedMeasures
)
rec.MetricID, err = bin.ReadUint32(src)
if err != nil {
return
}
// free data pages
rec.FreeDataPages, err = s.readFreePageNumbers(src)
if err != nil {
return
}
// free index pages
rec.FreeIndexPages, err = s.readFreePageNumbers(src)
if err != nil {
return
}
return rec, nil
}
// HELPERS
func (s *Reader) readFreePageNumbers(src *bytes.Buffer) ([]uint32, error) {
var freePages []uint32
qty, _, err := bin.ReadVarUint64(src)
if err != nil {
return nil, err
}
for range qty {
var pageNo uint32
pageNo, err = bin.ReadUint32(src)
if err != nil {
return nil, err
}
freePages = append(freePages, pageNo)
}
return freePages, nil
}
func (s *Reader) Seek(offset int64) error {
ret, err := s.file.Seek(offset, 0)
if err != nil {
return err
}
if ret != offset {
return fmt.Errorf("ret %d != offset %d", ret, offset)
}
return nil
}

@ -0,0 +1,507 @@
package txlog
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"os"
"path/filepath"
"sync"
octopus "gordenko.dev/dima/diploma"
"gordenko.dev/dima/diploma/bin"
"gordenko.dev/dima/diploma/proto"
)
const (
lsnSize = 4
packetPrefixSize = 12 // 4 lsn + 4 packet length + 4 crc32
lengthIdx = 0
checksumIdx = 4
lsnIdx = 8
filePerm = 0770
dumpSnapshotAfterNBytes = 1024 * 1024 * 1024 // 1 GB
)
const (
CodeAddedMetric byte = 1
CodeDeletedMetric byte = 2
CodeAppendedMeasure byte = 4
CodeAppendedMeasures byte = 5
CodeAppendedMeasureWithOverflow byte = 6
CodeDeletedMeasures byte = 7
)
func JoinChangesFileName(dir string, logNumber int) string {
return filepath.Join(dir, fmt.Sprintf("%d.changes", logNumber))
}
type Changes struct {
Records []any
LogNumber int
ForceSnapshot bool
ExitWaitGroup *sync.WaitGroup
WaitCh chan struct{}
}
type Writer struct {
mutex sync.Mutex
logNumber int
dir string
file *os.File
buf *bytes.Buffer
redoFilesToDelete []string
workerReqs []any
waitCh chan struct{}
appendToWorkerQueue func(any)
lsn uint32
written int64
isExited bool
exitCh chan struct{}
waitGroup *sync.WaitGroup
signalCh chan struct{}
}
type WriterOptions struct {
Dir string
LogNumber int // номер журнала
AppendToWorkerQueue func(any)
ExitCh chan struct{}
WaitGroup *sync.WaitGroup
}
func NewWriter(opt WriterOptions) (*Writer, error) {
if opt.Dir == "" {
return nil, errors.New("Dir option is required")
}
if opt.AppendToWorkerQueue == nil {
return nil, errors.New("AppendToWorkerQueue option is required")
}
if opt.ExitCh == nil {
return nil, errors.New("ExitCh option is required")
}
if opt.WaitGroup == nil {
return nil, errors.New("WaitGroup option is required")
}
s := &Writer{
dir: opt.Dir,
buf: bytes.NewBuffer(nil),
appendToWorkerQueue: opt.AppendToWorkerQueue,
logNumber: opt.LogNumber,
exitCh: opt.ExitCh,
waitGroup: opt.WaitGroup,
signalCh: make(chan struct{}, 1),
}
var err error
if opt.LogNumber > 0 {
s.file, err = os.OpenFile(
JoinChangesFileName(opt.Dir, s.logNumber),
os.O_APPEND|os.O_WRONLY,
filePerm,
)
if err != nil {
return nil, err
}
} else {
s.logNumber = 1
s.file, err = os.OpenFile(
JoinChangesFileName(opt.Dir, s.logNumber),
os.O_CREATE|os.O_WRONLY,
filePerm,
)
if err != nil {
return nil, err
}
}
s.reset()
return s, nil
}
func (s *Writer) Run() {
for {
select {
case <-s.signalCh:
if err := s.flush(); err != nil {
octopus.Abort(octopus.FailedWriteToTxLog, err)
}
case <-s.exitCh:
s.exit()
return
}
}
}
func (s *Writer) reset() {
s.buf.Reset()
s.buf.Write([]byte{
0, 0, 0, 0, // packet length
0, 0, 0, 0, // crc32
0, 0, 0, 0, // lsn
})
s.redoFilesToDelete = nil
s.workerReqs = nil
s.waitCh = make(chan struct{})
}
func (s *Writer) flush() error {
s.mutex.Lock()
workerReqs := s.workerReqs
waitCh := s.waitCh
isExited := s.isExited
var exitWaitGroup *sync.WaitGroup
if s.isExited {
exitWaitGroup = s.waitGroup
}
if s.buf.Len() > packetPrefixSize {
redoFilesToDelete := s.redoFilesToDelete
s.lsn++
lsn := s.lsn
packet := make([]byte, s.buf.Len())
copy(packet, s.buf.Bytes())
s.reset()
s.written += int64(len(packet)) + 12
s.mutex.Unlock()
bin.PutUint32(packet[lengthIdx:], uint32(len(packet)-packetPrefixSize))
bin.PutUint32(packet[lsnIdx:], lsn)
bin.PutUint32(packet[checksumIdx:], crc32.ChecksumIEEE(packet[8:]))
n, err := s.file.Write(packet)
if err != nil {
return fmt.Errorf("TxLog write: %s", err)
}
if n != len(packet) {
return fmt.Errorf("TxLog written %d != packet size %d", n, len(packet))
}
if err := s.file.Sync(); err != nil {
return fmt.Errorf("TxLog sync: %s", err)
}
for _, fileName := range redoFilesToDelete {
err = os.Remove(fileName)
if err != nil {
octopus.Abort(octopus.RemoveREDOFileFailed, err)
}
}
} else {
s.waitCh = make(chan struct{})
s.mutex.Unlock()
}
var forceSnapshot bool
if s.written > dumpSnapshotAfterNBytes {
forceSnapshot = true
}
if isExited && s.written > 0 {
forceSnapshot = true
}
if forceSnapshot {
if err := s.file.Close(); err != nil {
return fmt.Errorf("close changes file: %s", err)
}
s.logNumber++
var err error
s.file, err = os.OpenFile(
JoinChangesFileName(s.dir, s.logNumber),
os.O_CREATE|os.O_WRONLY,
filePerm,
)
if err != nil {
return fmt.Errorf("create new changes file: %s", err)
}
s.written = 0
}
s.appendToWorkerQueue(Changes{
Records: workerReqs,
ForceSnapshot: forceSnapshot,
LogNumber: s.logNumber,
WaitCh: waitCh,
ExitWaitGroup: exitWaitGroup,
})
return nil
}
func (s *Writer) exit() {
s.mutex.Lock()
s.isExited = true
s.mutex.Unlock()
if err := s.flush(); err != nil {
octopus.Abort(octopus.FailedWriteToTxLog, err)
}
}
// API
type AddedMetric struct {
MetricID uint32
MetricType octopus.MetricType
FracDigits int
}
func (s *Writer) WriteAddedMetric(req AddedMetric) chan struct{} {
arr := []byte{
CodeAddedMetric,
0, 0, 0, 0, //
byte(req.MetricType),
byte(req.FracDigits),
}
bin.PutUint32(arr[1:], req.MetricID)
// пишу в буфер
s.mutex.Lock()
s.buf.Write(arr)
s.workerReqs = append(s.workerReqs, req)
s.mutex.Unlock()
s.sendSignal()
return s.waitCh
}
type DeletedMetric struct {
MetricID uint32
FreeDataPages []uint32
FreeIndexPages []uint32
}
func (s *Writer) WriteDeletedMetric(req DeletedMetric) chan struct{} {
arr := []byte{
CodeDeletedMetric,
0, 0, 0, 0, // metricID
}
bin.PutUint32(arr[1:], req.MetricID)
// пишу в буфер
s.mutex.Lock()
defer s.mutex.Unlock()
s.buf.Write(arr)
s.packFreeDataAndIndexPages(req.FreeDataPages, req.FreeIndexPages)
s.workerReqs = append(s.workerReqs, req)
s.sendSignal()
return s.waitCh
}
type AppendedMeasure struct {
MetricID uint32
Timestamp uint32
Value float64
}
func (s *Writer) WriteAppendMeasure(req AppendedMeasure) chan struct{} {
arr := []byte{
CodeAppendedMeasure,
0, 0, 0, 0, // metricID
0, 0, 0, 0, // timestamp
0, 0, 0, 0, 0, 0, 0, 0, // value
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint32(arr[5:], req.Timestamp)
bin.PutFloat64(arr[9:], req.Value)
//
s.mutex.Lock()
s.buf.Write(arr)
s.workerReqs = append(s.workerReqs, req)
s.mutex.Unlock()
s.sendSignal()
return s.waitCh
}
type AppendedMeasures struct {
MetricID uint32
Measures []proto.Measure
}
type AppendedMeasuresExtended struct {
Record AppendedMeasures
HoldLock bool
}
func (s *Writer) WriteAppendMeasures(req AppendedMeasures, holdLock bool) chan struct{} {
arr := []byte{
CodeAppendedMeasures,
0, 0, 0, 0, // metricID
0, 0, // qty
}
bin.PutUint32(arr[1:], req.MetricID)
bin.PutUint16(arr[5:], uint16(len(req.Measures)))
//
s.mutex.Lock()
s.buf.Write(arr)
for _, measure := range req.Measures {
bin.WriteUint32(s.buf, measure.Timestamp)
bin.WriteFloat64(s.buf, measure.Value)
}
s.workerReqs = append(s.workerReqs, AppendedMeasuresExtended{
Record: req,
HoldLock: holdLock,
})
s.mutex.Unlock()
s.sendSignal()
return s.waitCh
}
type AppendedMeasureWithOverflow struct {
MetricID uint32
Timestamp uint32
Value float64
IsDataPageReused bool
DataPageNo uint32
IsRootChanged bool
RootPageNo uint32
ReusedIndexPages []uint32
}
type AppendedMeasureWithOverflowExtended struct {
Record AppendedMeasureWithOverflow
HoldLock bool
}
/*
Формат:
1b code
4b metricID
4b timestamp
8b value
1b isReusedDataPage
4b dataPageNo
1b isRootChanged
[4b] newRootPageNo
1b reusedIndexPages length
[N * 4b] reusedIndexPages
*/
func (s *Writer) WriteAppendedMeasureWithOverflow(req AppendedMeasureWithOverflow, redoFileName string, holdLock bool) chan struct{} {
size := 24 + len(req.ReusedIndexPages)*4
if req.IsRootChanged {
size += 4
}
tmp := make([]byte, size)
tmp[0] = CodeAppendedMeasureWithOverflow
bin.PutUint32(tmp[1:], req.MetricID)
bin.PutUint32(tmp[5:], req.Timestamp)
bin.PutFloat64(tmp[9:], req.Value)
if req.IsDataPageReused {
tmp[17] = 1
}
bin.PutUint32(tmp[18:], req.DataPageNo)
pos := 22
if req.IsRootChanged {
tmp[pos] = 1
bin.PutUint32(tmp[pos+1:], req.RootPageNo)
pos += 5
} else {
tmp[pos] = 0
pos += 1
}
tmp[pos] = byte(len(req.ReusedIndexPages))
pos += 1
for _, indexPageNo := range req.ReusedIndexPages {
bin.PutUint32(tmp[pos:], indexPageNo)
pos += 4
}
s.mutex.Lock()
s.buf.Write(tmp)
s.workerReqs = append(s.workerReqs, AppendedMeasureWithOverflowExtended{
Record: req,
HoldLock: holdLock,
})
s.redoFilesToDelete = append(s.redoFilesToDelete, redoFileName)
s.mutex.Unlock()
s.sendSignal()
return s.waitCh
}
type DeletedMeasures struct {
MetricID uint32
FreeDataPages []uint32
FreeIndexPages []uint32
}
/*
Формат:
1b code
4b metricID
1b freeDataPages length
[N * 4b] freeDataPages
1b freeIndexPages length
[N * 4b] freeIndexPages
*/
func (s *Writer) WriteDeletedMeasures(op DeletedMeasures) chan struct{} {
tmp := []byte{
CodeDeletedMeasures,
0, 0, 0, 0,
}
bin.PutUint32(tmp[1:], op.MetricID)
// записываю часть фиксированного размера
s.mutex.Lock()
s.buf.Write(tmp)
s.packFreeDataAndIndexPages(op.FreeDataPages, op.FreeIndexPages)
s.workerReqs = append(s.workerReqs, op)
s.mutex.Unlock()
s.sendSignal()
return s.waitCh
}
type DeletedMeasuresSince struct {
MetricID uint32
LastPageNo uint32
IsRootChanged bool
RootPageNo uint32
FreeDataPages []uint32
FreeIndexPages []uint32
TimestampsBuf []byte
ValuesBuf []byte
}
func (s *Writer) sendSignal() {
select {
case s.signalCh <- struct{}{}:
default:
}
}
// helper
func (s *Writer) packFreeDataAndIndexPages(freeDataPages, freeIndexPages []uint32) {
// записываю data pages
bin.WriteVarUint64(s.buf, uint64(len(freeDataPages)))
for _, dataPageNo := range freeDataPages {
bin.WriteUint32(s.buf, dataPageNo)
}
// записываю index pages
bin.WriteVarUint64(s.buf, uint64(len(freeIndexPages)))
for _, indexPageNo := range freeIndexPages {
bin.WriteUint32(s.buf, indexPageNo)
}
}

@ -0,0 +1,11 @@
cd examples/database
env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o ../../database_windows
cd -
cd examples/loadtest
env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o ../../loadtest_windows
cd -
cd examples/requests
env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o ../../requests_windows
cd -
Loading…
Cancel
Save