update to use unix time in prep for replacing TEXT field for a simmple int (hoping for perf increasee)
ci.vdhsn.com/push Build is passing Details

trunk
Adam Veldhousen 3 years ago
parent b214d17ea2
commit 6ee05ebb06
Signed by: adam
GPG Key ID: 6DB29003C6DD1E4B

@ -22,7 +22,7 @@ func (dm *DomainManager) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
responseMessage := new(dns.Msg) responseMessage := new(dns.Msg)
ql := QueryLog{ ql := QueryLog{
Started: start.UTC(), Started: start.UTC().Format(ISO8601),
Protocol: w.RemoteAddr().Network(), Protocol: w.RemoteAddr().Network(),
ClientIP: w.RemoteAddr().String()[:strings.LastIndex(w.RemoteAddr().String(), ":")], ClientIP: w.RemoteAddr().String()[:strings.LastIndex(w.RemoteAddr().String(), ":")],
Domain: q.Name, Domain: q.Name,
@ -79,8 +79,17 @@ const (
NoAnswer = ResponseStatus("NO ANSWER") NoAnswer = ResponseStatus("NO ANSWER")
) )
func (ql QueryLog) StartedTime() time.Time {
if ql.Started == "" {
return time.Time{}
}
out, _ := time.Parse(ISO8601, ql.Started)
return out
}
type QueryLog struct { type QueryLog struct {
Started time.Time Started string
ClientIP string ClientIP string
Protocol string Protocol string
Domain string Domain string

@ -54,7 +54,7 @@ func NewAdminHandler(c Cache, s Storage, re *RuleEngine, content fs.FS) http.Han
r.Use(middleware.AllowContentType("application/json; utf-8", "application/json")) r.Use(middleware.AllowContentType("application/json; utf-8", "application/json"))
r.Use(middleware.SetHeader("Content-Type", "application/json; utf-8")) r.Use(middleware.SetHeader("Content-Type", "application/json; utf-8"))
r.Use(middleware.Timeout(time.Second * 5)) r.Use(middleware.Timeout(time.Second * 15))
r.Get("/metrics/log", RestHandler(a.getLog).ToHF()) r.Get("/metrics/log", RestHandler(a.getLog).ToHF())
r.Get("/metrics/stats", RestHandler(a.getStats).ToHF()) r.Get("/metrics/stats", RestHandler(a.getStats).ToHF())

@ -14,30 +14,25 @@ func (a *adminHandler) getStats(r *http.Request) (*RestResponse, error) {
intervalSecondsStr := q.Get("interval") intervalSecondsStr := q.Get("interval")
var err error var err error
startTime := time.Now().Add(time.Hour * -86400)
endTime := time.Now() startUnixTime := time.Now().Add(time.Second * -86400).UTC().Unix()
endUnixTime := time.Now().UTC().Unix()
if startFilter != "" { if startFilter != "" {
var startUnixTime int64
if startUnixTime, err = strconv.ParseInt(startFilter, 10, strconv.IntSize); err != nil { if startUnixTime, err = strconv.ParseInt(startFilter, 10, strconv.IntSize); err != nil {
return BasicResponse(false, "start: must be a valid unix timestamp"), nil return BasicResponse(false, "start: must be a valid unix timestamp"), nil
} }
startTime = time.Unix(startUnixTime, 0)
} }
if endFilter != "" { if endFilter != "" {
var endUnixTime int64
if endUnixTime, err = strconv.ParseInt(endFilter, 10, strconv.IntSize); err != nil { if endUnixTime, err = strconv.ParseInt(endFilter, 10, strconv.IntSize); err != nil {
return BasicResponse(false, "end: must be a valid unix timestamp"), nil return BasicResponse(false, "end: must be a valid unix timestamp"), nil
} }
endTime = time.Unix(endUnixTime, 0)
} }
lai := LogAggregateInput{ lai := LogAggregateInput{
Start: startTime, Start: startUnixTime,
End: endTime, End: endUnixTime,
Column: key, Column: key,
} }
@ -70,25 +65,20 @@ func (a *adminHandler) getLog(r *http.Request) (*RestResponse, error) {
var err error var err error
var page int var page int
pageSize := 25 pageSize := 25
startTime := time.Now().Add(time.Hour * -86400) startUnixTime := time.Now().UTC().Add(time.Second * -86400).Unix()
endTime := time.Now() endUnixTime := time.Now().UTC().Unix()
if startFilter != "" { if startFilter != "" {
var startUnixTime int64
if startUnixTime, err = strconv.ParseInt(startFilter, 10, strconv.IntSize); err != nil { if startUnixTime, err = strconv.ParseInt(startFilter, 10, strconv.IntSize); err != nil {
return BasicResponse(false, "start: must be a valid unix timestamp"), nil return BasicResponse(false, "start: must be a valid unix timestamp"), nil
} }
startTime = time.Unix(startUnixTime, 0)
} }
if endFilter != "" { if endFilter != "" {
var endUnixTime int64
if endUnixTime, err = strconv.ParseInt(endFilter, 10, strconv.IntSize); err != nil { if endUnixTime, err = strconv.ParseInt(endFilter, 10, strconv.IntSize); err != nil {
return BasicResponse(false, "end: must be a valid unix timestamp"), nil return BasicResponse(false, "end: must be a valid unix timestamp"), nil
} }
endTime = time.Unix(endUnixTime, 0)
} }
if pageStr != "" { if pageStr != "" {
@ -105,8 +95,8 @@ func (a *adminHandler) getLog(r *http.Request) (*RestResponse, error) {
gli := GetLogInput{ gli := GetLogInput{
// Filter: filter, // Filter: filter,
Start: startTime, Start: startUnixTime,
End: endTime, End: endUnixTime,
Limit: pageSize, Limit: pageSize,
Page: page, Page: page,
} }

@ -14,8 +14,8 @@ import (
) )
const ( const (
defaultSamples = 64 defaultSamples = 32
maxSamples = 128 maxSamples = 64
) )
type Sqlite struct { type Sqlite struct {
@ -46,19 +46,20 @@ func (ss *Sqlite) Close() error {
} }
func (ss *Sqlite) GetLogAggregate(la LogAggregateInput) (LogAggregate, error) { func (ss *Sqlite) GetLogAggregate(la LogAggregateInput) (LogAggregate, error) {
if la.End.IsZero() || la.End.After(time.Now()) { rightNow := time.Now().UTC().Unix()
la.End = time.Now().UTC() if la.End <= 0 || la.End > rightNow {
la.End = rightNow
} }
if la.Start.After(la.End) { if la.Start > la.End {
return LogAggregate{}, errors.New("Start time cannot be before end time") return LogAggregate{}, errors.New("Start time cannot be before end time")
} }
if la.Start.IsZero() { if la.Start <= 0 {
la.Start = time.Now().UTC().Add(time.Hour * -12) la.Start = time.Now().UTC().Add(time.Hour * -12).Unix()
} }
timespanSecs := int(la.End.Sub(la.Start) / time.Second) timespanSecs := int(la.End - la.Start)
// how many data points to show on the line plot // how many data points to show on the line plot
sampleCount := defaultSamples sampleCount := defaultSamples
@ -87,68 +88,76 @@ func (ss *Sqlite) GetLogAggregate(la LogAggregateInput) (LogAggregate, error) {
la.Column = string(Domain) la.Column = string(Domain)
} }
logs, err := ss.GetLog(GetLogInput{
Start: la.Start,
End: la.End,
Limit: -1,
Page: 0,
})
if err != nil {
return LogAggregate{}, err
}
if logs.PageCount > 1 {
return LogAggregate{}, fmt.Errorf("more than one page available: %v", logs.PageCount)
}
buckets := map[string][]StatsDataPoint{}
for _, l := range logs.Logs {
k := GetAggregateColumnHeader(l, LogAggregateColumn(la.Column))
if _, ok := buckets[k]; !ok {
buckets[k] = make([]StatsDataPoint, sampleCount)
}
dataset := buckets[k]
timeIndex := int(l.Started.Sub(la.Start)/time.Second) / la.IntervalSeconds
if Assert(timeIndex >= len(dataset), "ERROR TIME INDEX OUT OF RANGE %d/%d with interval %d - time index: %s", timeIndex, len(dataset), la.IntervalSeconds, l.Started.Sub(la.Start)/time.Second) {
continue
}
ladp := dataset[timeIndex]
ladp.Header = k
offsetSecs := (timeIndex * la.IntervalSeconds)
ladp.Time = la.Start.Add(time.Duration(offsetSecs) * time.Second)
ladp.Count += 1
ladp.Value += float64(l.TotalTimeMs)
buckets[k][timeIndex] = ladp
}
laResult := LogAggregate{ laResult := LogAggregate{
Labels: make([]string, sampleCount), Labels: make([]string, sampleCount),
Datasets: make([]LogAggregateDataset, len(buckets)), Datasets: make([]LogAggregateDataset, 0),
}
for idx := 0; idx < sampleCount; idx++ {
offsetSecs := (idx * la.IntervalSeconds)
ts := la.Start.Add(time.Duration(offsetSecs) * time.Second)
laResult.Labels[idx] = ts.Format("01-02 15:04:05")
idx := 0
for k, v := range buckets {
ladp := v[idx]
if ladp.Time.IsZero() {
v[idx].Time = ts
}
laResult.Datasets[idx].Dataset = v
laResult.Datasets[idx].Label = k
idx++
}
} }
return laResult, nil return laResult, nil
// logs, err := ss.GetLog(GetLogInput{
// Start: la.Start,
// End: la.End,
// Limit: -1,
// Page: 0,
// })
// if err != nil {
// return LogAggregate{}, err
// }
// if logs.PageCount > 1 {
// return LogAggregate{}, fmt.Errorf("more than one page available: %v", logs.PageCount)
// }
// buckets := map[string][]StatsDataPoint{}
// for _, l := range logs.Logs {
// k := GetAggregateColumnHeader(l, LogAggregateColumn(la.Column))
// if _, ok := buckets[k]; !ok {
// buckets[k] = make([]StatsDataPoint, sampleCount)
// }
// dataset := buckets[k]
// timeIndex := int(l.Started.Unix()-la.Start) / la.IntervalSeconds
// if Assert(timeIndex >= len(dataset), "ERROR TIME INDEX OUT OF RANGE %d/%d with interval %d - time index: %s", timeIndex, len(dataset), la.IntervalSeconds, l.Started.Sub(time.Unix(la.Start, 0))/time.Second) {
// continue
// }
// ladp := dataset[timeIndex]
// ladp.Header = k
// offsetSecs := int64(timeIndex * la.IntervalSeconds)
// ladp.Time = time.Unix(la.Start+offsetSecs, 0)
// ladp.Count += 1
// ladp.Value += float64(l.TotalTimeMs)
// buckets[k][timeIndex] = ladp
// }
// laResult := LogAggregate{
// Labels: make([]string, sampleCount),
// Datasets: make([]LogAggregateDataset, len(buckets)),
// }
// for idx := 0; idx < sampleCount; idx++ {
// offsetSecs := int64(idx * la.IntervalSeconds)
// ts := time.Unix(la.Start+offsetSecs, 0)
// laResult.Labels[idx] = ts.Format("01-02 15:04:05")
// idx := 0
// for k, v := range buckets {
// if idx < len(v) {
// if v[idx].Time.IsZero() {
// v[idx].Time = ts
// }
// }
// laResult.Datasets[idx].Dataset = v
// laResult.Datasets[idx].Label = k
// idx++
// }
// }
// return laResult, nil
} }
type LogAggregate struct { type LogAggregate struct {
@ -169,7 +178,7 @@ func (ss *Sqlite) Log(ql QueryLog) error {
(?, ?, ?, ?, ?, ?, ?, ?, ?); (?, ?, ?, ?, ?, ?, ?, ?, ?);
` `
if _, err := ss.DB.Exec(sql, if _, err := ss.DB.Exec(sql,
ql.Started.UTC().Format(ISO8601), ql.Started,
ql.ClientIP, ql.ClientIP,
ql.Protocol, ql.Protocol,
ql.Domain, ql.Domain,
@ -366,12 +375,12 @@ func (ss *Sqlite) GetLog(in GetLogInput) (GetLogResult, error) {
in.Limit = 25 in.Limit = 25
} }
if in.Start.IsZero() { if in.Start == 0 {
in.Start = time.Now().Add(time.Hour * -86400) in.Start = time.Now().Add(time.Hour * -86400).Unix()
} }
if in.End.IsZero() { if in.End == 0 {
in.End = time.Now() in.End = time.Now().Unix()
} }
glr := GetLogResult{ glr := GetLogResult{
@ -387,12 +396,11 @@ func (ss *Sqlite) GetLog(in GetLogInput) (GetLogResult, error) {
glr.TotalResults = lpi.Total glr.TotalResults = lpi.Total
glr.PageCount = lpi.PageCount + 1 glr.PageCount = lpi.PageCount + 1
limitTxt := "LIMIT ?"
if in.Limit <= -1 { if in.Limit <= -1 {
limitTxt = "" in.Limit = 100000
} }
sql := fmt.Sprintf(` sql := `
SELECT SELECT
started, clientIp, protocol, domain, totalTimeMs, started, clientIp, protocol, domain, totalTimeMs,
error, recurseRoundTripTimeMs, recurseUpstreamIp, status error, recurseRoundTripTimeMs, recurseUpstreamIp, status
@ -408,12 +416,12 @@ func (ss *Sqlite) GetLog(in GetLogInput) (GetLogResult, error) {
recurseUpstreamIp, recurseUpstreamIp,
status status
FROM log FROM log
WHERE CAST(strftime('%%s', started) AS INTEGER) BETWEEN ? AND ? WHERE CAST(strftime('%s', started) AS INTEGER) BETWEEN ? AND ?
ORDER BY started DESC ORDER BY started DESC
) WHERE id <= ? ORDER BY id DESC %s; ) WHERE id <= ? ORDER BY id DESC LIMIT ?;
`, limitTxt) `
rows, err := ss.DB.Query(sql, in.Start.UTC().Format(ISO8601), in.End.UTC().Format(ISO8601), lpi.FirstItemID, in.Limit) rows, err := ss.DB.Query(sql, in.Start, in.End, lpi.FirstItemID, in.Limit)
if err != nil { if err != nil {
return glr, fmt.Errorf("issue with GetLog sql query: %w", err) return glr, fmt.Errorf("issue with GetLog sql query: %w", err)
} }
@ -425,10 +433,9 @@ func (ss *Sqlite) GetLog(in GetLogInput) (GetLogResult, error) {
for rows.Next() { for rows.Next() {
var q QueryLog var q QueryLog
var started string
if err := rows.Scan( if err := rows.Scan(
&started, &q.Started,
&q.ClientIP, &q.ClientIP,
&q.Protocol, &q.Protocol,
&q.Domain, &q.Domain,
@ -441,10 +448,6 @@ func (ss *Sqlite) GetLog(in GetLogInput) (GetLogResult, error) {
return glr, fmt.Errorf("issues scanning rows: %w", err) return glr, fmt.Errorf("issues scanning rows: %w", err)
} }
if q.Started, err = time.Parse(ISO8601, started); err != nil {
return glr, fmt.Errorf("could not parse time '%s': %w", started, err)
}
glr.Logs = append(glr.Logs, q) glr.Logs = append(glr.Logs, q)
} }
@ -471,15 +474,20 @@ func (ss *Sqlite) GetPagingInfo(in GetLogInput) (lpi LogPageInfo, err error) {
` `
pageOffset := in.Limit * in.Page pageOffset := in.Limit * in.Page
row := ss.QueryRow(sql, in.Limit, pageOffset, in.Start.UTC().Format(ISO8601), in.End.UTC().Format(ISO8601)) row := ss.QueryRow(sql, in.Limit, pageOffset, in.Start, in.End)
if err = row.Scan(&lpi.Total, &lpi.PageCount, &lpi.FirstItemID); err != nil { if err = row.Scan(&lpi.Total, &lpi.PageCount, &lpi.FirstItemID); err != nil {
return return
} }
if in.Limit < 0 {
lpi.PageCount = 0
}
if pageOffset > lpi.Total { if pageOffset > lpi.Total {
err = errors.New("page number too high") err = errors.New("page number too high")
} }
log.Printf("in: %+v, out: %+v", in, lpi)
return return
} }
@ -497,7 +505,7 @@ func initTable(db *sql.DB) error {
recurseUpStreamIP TEXT, recurseUpStreamIP TEXT,
status TEXT NOT NULL status TEXT NOT NULL
); );
CREATE INDEX IF NOT EXISTS idx_log_started ON log (started);
CREATE TABLE IF NOT EXISTS rules ( CREATE TABLE IF NOT EXISTS rules (
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,

@ -32,8 +32,8 @@ var (
type LogAggregateInput struct { type LogAggregateInput struct {
IntervalSeconds int IntervalSeconds int
Start time.Time Start int64
End time.Time End int64
Column string Column string
} }
@ -71,11 +71,11 @@ type RecursorRow struct {
} }
type GetLogInput struct { type GetLogInput struct {
Start time.Time `json:"start"` Start int64 `json:"start"`
End time.Time `json:"end"` End int64 `json:"end"`
DomainFilter string `json:"rawfilter"` DomainFilter string `json:"rawfilter"`
Limit int `json:"pageSize"` Limit int `json:"pageSize"`
Page int `json:"page"` Page int `json:"page"`
} }
type RuleRow struct { type RuleRow struct {

@ -7,7 +7,7 @@ build: clobber .bin/client/public .bin/gopherhole
dev: clean .bin/gopherhole dev: clean .bin/gopherhole
GOPHERHOLE_UPSTREAM="1.1.1.1:53" \ GOPHERHOLE_UPSTREAM="1.1.1.1:53" \
GOPHERHOLE_MIN_TTL="60s" \ GOPHERHOLE_MIN_TTL="60s" \
.bin/gopherhole -dns-address=:15353 -http-address=:8000 .bin/gopherhole -dns-address=:15353 -http-address=:8000 -db-path=.bin/
client-dev: client-dev:
docker run -it --rm --name='client-dev' \ docker run -it --rm --name='client-dev' \

Loading…
Cancel
Save