diff --git a/api/entry.go b/api/entry.go
index c230a5d3..84caa3c2 100644
--- a/api/entry.go
+++ b/api/entry.go
@@ -146,8 +146,8 @@ func (h *GatewayAPI) searchEntry(w http.ResponseWriter, req *http.Request, ps ht
var (
name = h.GetParameterOrDefault(req, "name", "")
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
- size = h.GetIntOrDefault(req, "size", 20)
- from = h.GetIntOrDefault(req, "from", 0)
+ size = h.GetIntOrDefault(req, "size", 20)
+ from = h.GetIntOrDefault(req, "from", 0)
mustBuilder = &strings.Builder{}
)
if name != "" {
diff --git a/api/flow.go b/api/flow.go
index 435f6dee..cfa3c7ad 100644
--- a/api/flow.go
+++ b/api/flow.go
@@ -179,7 +179,7 @@ func (h *GatewayAPI) searchFlow(w http.ResponseWriter, req *http.Request, ps htt
func (h *GatewayAPI) getFlowFilters(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
- meta:=pipeline.GetFilterMetadata()
+ meta := pipeline.GetFilterMetadata()
- h.WriteJSON(w, meta,200)
+ h.WriteJSON(w, meta, 200)
}
diff --git a/cmd/echo_server/main.go b/cmd/echo_server/main.go
index 8def53a4..a472a7cf 100644
--- a/cmd/echo_server/main.go
+++ b/cmd/echo_server/main.go
@@ -35,7 +35,8 @@ import (
var port = flag.Int("port", 8080, "listening port")
var debug = flag.Bool("debug", false, "dump request")
-var name =util.PickRandomName()
+var name = util.PickRandomName()
+
func main() {
runtime.GOMAXPROCS(1)
flag.Parse()
@@ -50,7 +51,7 @@ func main() {
}
func requestHandler(ctx *fasthttp.RequestCtx) {
- if *debug{
+ if *debug {
fmt.Println(string(ctx.Request.PhantomURI().Scheme()))
fmt.Println(string(ctx.Request.PhantomURI().Host()))
fmt.Println(string(ctx.Request.PhantomURI().FullURI()))
@@ -59,10 +60,10 @@ func requestHandler(ctx *fasthttp.RequestCtx) {
fmt.Println(string(ctx.Request.PhantomURI().Hash()))
fmt.Println(string(ctx.Request.PhantomURI().Username()))
fmt.Println(string(ctx.Request.PhantomURI().Password()))
- fmt.Println(ctx.Request.Header.String(),true)
- fmt.Println(ctx.Request.GetRawBody(),true)
+ fmt.Println(ctx.Request.Header.String(), true)
+ fmt.Println(ctx.Request.GetRawBody(), true)
}
- ctx.Response.Header.Set("SERVER",name)
+ ctx.Response.Header.Set("SERVER", name)
ctx.Response.SetStatusCode(200)
fmt.Fprintf(ctx, ".")
}
diff --git a/cmd/kafka_consumer/main.go b/cmd/kafka_consumer/main.go
index 66402a33..9435866e 100644
--- a/cmd/kafka_consumer/main.go
+++ b/cmd/kafka_consumer/main.go
@@ -26,15 +26,16 @@
* Email: hello#infini.ltd */
package main
+
import (
-"context"
-"flag"
-"fmt"
-"os"
-"os/signal"
-"strings"
-
-"github.com/twmb/franz-go/pkg/kgo"
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "os/signal"
+ "strings"
+
+ "github.com/twmb/franz-go/pkg/kgo"
)
var (
@@ -143,4 +144,3 @@ func consume(cl *kgo.Client, style int) {
}
}
}
-
diff --git a/cmd/s3download/main.go b/cmd/s3download/main.go
index b0481bbf..64014e5a 100644
--- a/cmd/s3download/main.go
+++ b/cmd/s3download/main.go
@@ -76,8 +76,8 @@ func main() {
// Upload the zip file
filePath := "/Users/medcl/Downloads/coraza-waf-master.zip"
- folder:="myfolder"
- objectName := path.Join(folder,filepath.Base(filePath))
+ folder := "myfolder"
+ objectName := path.Join(folder, filepath.Base(filePath))
contentType := "application/zip"
@@ -90,4 +90,3 @@ func main() {
log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size)
}
-
diff --git a/common/model/model.go b/common/model/model.go
index 9e192d06..d04627cc 100644
--- a/common/model/model.go
+++ b/common/model/model.go
@@ -61,14 +61,14 @@ type DataFlow struct {
}
type HttpRequest struct {
- ID uint64 `json:"id,omitempty"`
- LoggingTime string `json:"timestamp,omitempty"`
- LocalIP string `json:"local_ip,omitempty"`
- RemoteIP string `json:"remote_ip,omitempty"`
- IsTLS bool `json:"tls"`
- TLSDidResume bool `json:"tls_reuse,omitempty"`
- Request *Request `json:"request,omitempty"`
- Response *Response `json:"response,omitempty"`
- DataFlow *DataFlow `json:"flow,omitempty"`
- Elastic map[string]interface{} `json:"elastic,omitempty"`
-}
\ No newline at end of file
+ ID uint64 `json:"id,omitempty"`
+ LoggingTime string `json:"timestamp,omitempty"`
+ LocalIP string `json:"local_ip,omitempty"`
+ RemoteIP string `json:"remote_ip,omitempty"`
+ IsTLS bool `json:"tls"`
+ TLSDidResume bool `json:"tls_reuse,omitempty"`
+ Request *Request `json:"request,omitempty"`
+ Response *Response `json:"response,omitempty"`
+ DataFlow *DataFlow `json:"flow,omitempty"`
+ Elastic map[string]interface{} `json:"elastic,omitempty"`
+}
diff --git a/common/role_mapping.go b/common/role_mapping.go
index 912b1704..7db17aac 100644
--- a/common/role_mapping.go
+++ b/common/role_mapping.go
@@ -26,14 +26,12 @@
package common
-
-func GetLDAPGroupsMappingRoles(str []string)[]string {
+func GetLDAPGroupsMappingRoles(str []string) []string {
var roles []string
- roles=append(roles,"admin")
+ roles = append(roles, "admin")
return roles
}
-
type Roles struct {
ClusterAllowedRoles map[string][]Role
ClusterDeniedRoles map[string][]Role
@@ -41,7 +39,7 @@ type Roles struct {
type Role struct {
Cluster []ClusterPermission //A list of cluster privileges.
- Indices []IndexPermission //A list of indices permissions entries.
+ Indices []IndexPermission //A list of indices permissions entries.
}
type ClusterPermission struct {
@@ -50,10 +48,10 @@ type ClusterPermission struct {
}
type IndexPermission struct {
- Name []string
- Privileges []string
- FieldSecurity []string
- Query string
+ Name []string
+ Privileges []string
+ FieldSecurity []string
+ Query string
AllowRestrictedIndices string
}
diff --git a/common/strings.go b/common/strings.go
index 91f0abf8..48adbcfb 100644
--- a/common/strings.go
+++ b/common/strings.go
@@ -43,4 +43,3 @@ const CACHEABLE = "request_cacheable"
const CACHEHASH = "request_cache_hash"
var FaviconPath = []byte("/favicon.ico")
-
diff --git a/config/app.go b/config/app.go
index 6dcee4ac..aced1d59 100644
--- a/config/app.go
+++ b/config/app.go
@@ -1,7 +1,7 @@
package config
type UIConfig struct {
- Enabled bool `config:"enabled"`
+ Enabled bool `config:"enabled"`
LocalPath string `config:"path"`
LocalEnabled bool `config:"local"`
VFSEnabled bool `config:"vfs"`
diff --git a/lib/procspy/lsof.go b/lib/procspy/lsof.go
index 7f05f00f..edb6926e 100644
--- a/lib/procspy/lsof.go
+++ b/lib/procspy/lsof.go
@@ -21,12 +21,11 @@ var (
//
// For example, this is one process with two listens and one connection:
//
-// p13100
-// cmpd
-// n[::1]:6600
-// n127.0.0.1:6600
-// n[::1]:6600->[::1]:50992
-//
+// p13100
+// cmpd
+// n[::1]:6600
+// n127.0.0.1:6600
+// n[::1]:6600->[::1]:50992
func parseLSOF(out string) (map[string]Proc, error) {
var (
res = map[string]Proc{} // Local addr -> Proc
diff --git a/main.go b/main.go
index d48673a9..fa52c1ca 100755
--- a/main.go
+++ b/main.go
@@ -60,12 +60,12 @@ func setup() {
module.RegisterSystemModule(&queue.Module{})
module.RegisterSystemModule(&task.TaskModule{})
module.RegisterSystemModule(&api.APIModule{})
- module.RegisterModuleWithPriority(&pipeline.PipeModule{},100)
+ module.RegisterModuleWithPriority(&pipeline.PipeModule{}, 100)
module.RegisterUserPlugin(forcemerge.ForceMergeModule{})
module.RegisterUserPlugin(floating_ip.FloatingIPPlugin{})
module.RegisterUserPlugin(&metrics.MetricsModule{})
- module.RegisterPluginWithPriority(&proxy.GatewayModule{},200)
+ module.RegisterPluginWithPriority(&proxy.GatewayModule{}, 200)
}
func start() {
diff --git a/pipeline/fast_bulk_indexing/bulk_indexing.go b/pipeline/fast_bulk_indexing/bulk_indexing.go
index 88001d74..08ba402b 100644
--- a/pipeline/fast_bulk_indexing/bulk_indexing.go
+++ b/pipeline/fast_bulk_indexing/bulk_indexing.go
@@ -117,16 +117,16 @@ func New(c *config.Config) (pipeline.Processor, error) {
Queues: map[string]interface{}{},
Consumer: queue.ConsumerConfig{
- Group: "group-001",
- Name: "consumer-001",
- FetchMinBytes: 1,
- FetchMaxBytes: 20 * 1024 * 1024,
- FetchMaxMessages: 500,
- EOFRetryDelayInMs: 500,
- FetchMaxWaitMs: 10000,
- ConsumeTimeoutInSeconds: 60,
- EOFMaxRetryTimes: 10,
- ClientExpiredInSeconds: 60,
+ Group: "group-001",
+ Name: "consumer-001",
+ FetchMinBytes: 1,
+ FetchMaxBytes: 20 * 1024 * 1024,
+ FetchMaxMessages: 500,
+ EOFRetryDelayInMs: 500,
+ FetchMaxWaitMs: 10000,
+ ConsumeTimeoutInSeconds: 60,
+ EOFMaxRetryTimes: 10,
+ ClientExpiredInSeconds: 60,
},
DetectActiveQueue: true,
diff --git a/pipeline/fast_flow_runner/flow_runner.go b/pipeline/fast_flow_runner/flow_runner.go
index 7a1670f1..7a44ee6e 100644
--- a/pipeline/fast_flow_runner/flow_runner.go
+++ b/pipeline/fast_flow_runner/flow_runner.go
@@ -84,16 +84,16 @@ func New(c *config.Config) (pipeline.Processor, error) {
cfg := Config{
NumOfWorkers: 1,
Consumer: queue.ConsumerConfig{
- Group: "group-001",
- Name: "consumer-001",
- FetchMinBytes: 1,
- FetchMaxBytes: 20 * 1024 * 1024,
- FetchMaxMessages: 500,
- EOFRetryDelayInMs: 500,
- FetchMaxWaitMs: 10000,
- ConsumeTimeoutInSeconds: 60,
- EOFMaxRetryTimes: 10,
- ClientExpiredInSeconds: 60,
+ Group: "group-001",
+ Name: "consumer-001",
+ FetchMinBytes: 1,
+ FetchMaxBytes: 20 * 1024 * 1024,
+ FetchMaxMessages: 500,
+ EOFRetryDelayInMs: 500,
+ FetchMaxWaitMs: 10000,
+ ConsumeTimeoutInSeconds: 60,
+ EOFMaxRetryTimes: 10,
+ ClientExpiredInSeconds: 60,
},
FlowMaxRunningTimeoutInSeconds: 60,
}
diff --git a/pipeline/flow_replay/flow_replay.go b/pipeline/flow_replay/flow_replay.go
index 5f1e0702..2b961f27 100644
--- a/pipeline/flow_replay/flow_replay.go
+++ b/pipeline/flow_replay/flow_replay.go
@@ -175,10 +175,10 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
if processor.config.KeepTags {
tags, ok := filterCtx.GetTags()
- if ok{
- ts:=[]string{}
- for _,v:=range tags{
- ts=append(ts,v)
+ if ok {
+ ts := []string{}
+ for _, v := range tags {
+ ts = append(ts, v)
}
ctx.AddTags(ts)
}
@@ -207,7 +207,6 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
releaseCtx(filterCtx)
}
-
log.Debugf("replay %v messages flow:[%v], elapsed:%v", len(messages), processor.config.FlowName, time.Since(start1))
}
diff --git a/pipeline/flow_runner/flow_runner.go b/pipeline/flow_runner/flow_runner.go
index e7691c27..a1b3b718 100644
--- a/pipeline/flow_runner/flow_runner.go
+++ b/pipeline/flow_runner/flow_runner.go
@@ -90,16 +90,16 @@ func init() {
func New(c *config.Config) (pipeline.Processor, error) {
cfg := Config{
Consumer: queue.ConsumerConfig{
- Group: "group-001",
- Name: "consumer-001",
- FetchMinBytes: 1,
- FetchMaxBytes: 20 * 1024 * 1024,
- FetchMaxMessages: 1000,
- EOFRetryDelayInMs: 500,
- FetchMaxWaitMs: 10000,
- ConsumeTimeoutInSeconds: 60,
- EOFMaxRetryTimes: 10,
- ClientExpiredInSeconds: 60,
+ Group: "group-001",
+ Name: "consumer-001",
+ FetchMinBytes: 1,
+ FetchMaxBytes: 20 * 1024 * 1024,
+ FetchMaxMessages: 1000,
+ EOFRetryDelayInMs: 500,
+ FetchMaxWaitMs: 10000,
+ ConsumeTimeoutInSeconds: 60,
+ EOFMaxRetryTimes: 10,
+ ClientExpiredInSeconds: 60,
},
SkipEmptyQueue: true,
CommitOnTag: "",
@@ -128,9 +128,9 @@ func (processor *FlowRunnerProcessor) Name() string {
func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
- if global.Env().IsDebug{
+ if global.Env().IsDebug {
log.Debugf("start flow_runner [%v]", processor.config.FlowName)
- defer log.Debugf("exit flow_runner [%v]", processor.config.FlowName)
+ defer log.Debugf("exit flow_runner [%v]", processor.config.FlowName)
}
var initOfffset queue.Offset
@@ -140,8 +140,8 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
initOfffset, _ = queue.GetOffset(qConfig, consumer)
offset = initOfffset
if processor.config.SkipEmptyQueue && !queue.ConsumerHasLag(qConfig, consumer) {
- log.Debug(processor.config.FlowName,", skip empty queue: ",qConfig.ID,",",qConfig.Name)
- time.Sleep(5*time.Second)
+ log.Debug(processor.config.FlowName, ", skip empty queue: ", qConfig.ID, ",", qConfig.Name)
+ time.Sleep(5 * time.Second)
return nil
}
@@ -175,7 +175,7 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
if !offset.Equals(initOfffset) {
ok, err := queue.CommitOffset(qConfig, consumer, offset)
- log.Debugf("%v,%v commit offset:%v, result:%v,%v", qConfig.Name, consumer.Name, offset,ok,err)
+ log.Debugf("%v,%v commit offset:%v, result:%v,%v", qConfig.Name, consumer.Name, offset, ok, err)
if !ok || err != nil {
ctx.RecordError(fmt.Errorf("failed to commit offset, ok: %v, err: %v", ok, err))
} else {
@@ -188,15 +188,15 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
defer util.ReleaseTimer(t1)
//acquire consumer
- consumerInstance, err := queue.AcquireConsumer(qConfig, consumer,ctx.ID())
- defer queue.ReleaseConsumer(qConfig, consumer,consumerInstance)
+ consumerInstance, err := queue.AcquireConsumer(qConfig, consumer, ctx.ID())
+ defer queue.ReleaseConsumer(qConfig, consumer, consumerInstance)
if err != nil || consumerInstance == nil {
panic(err)
}
ctx1 := &queue.Context{}
- ctx1.InitOffset=initOfffset
+ ctx1.InitOffset = initOfffset
lastCommitTime := time.Now()
var commitIdle = time.Duration(processor.config.CommitTimeoutInSeconds) * time.Second
@@ -221,17 +221,17 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
log.Debug(qConfig.Name, ",", consumer.Group, ",", consumer.Name, ",init offset:", offset)
}
- log.Debugf("star to consume queue:%v, %v", qConfig.Name,ctx1)
+ log.Debugf("star to consume queue:%v, %v", qConfig.Name, ctx1)
processor.config.Consumer.KeepActive()
- messages, timeout, err :=consumerInstance.FetchMessages(ctx1, processor.config.Consumer.FetchMaxMessages)
- log.Debugf("get %v messages from queue:%v, %v", len(messages), qConfig.Name,ctx1)
+ messages, timeout, err := consumerInstance.FetchMessages(ctx1, processor.config.Consumer.FetchMaxMessages)
+ log.Debugf("get %v messages from queue:%v, %v", len(messages), qConfig.Name, ctx1)
if err != nil && err.Error() != "EOF" {
log.Error(err)
panic(err)
}
- start1:=time.Now()
+ start1 := time.Now()
if len(messages) > 0 {
for _, pop := range messages {
ctx := acquireCtx()
@@ -253,7 +253,6 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
log.Tracef("end forward request to flow:%v", processor.config.FlowName)
}
-
if processor.config.CommitOnTag != "" {
tags, ok := ctx.GetTags()
if ok {
@@ -263,7 +262,7 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
//log.Error("hit commit tag:",i,"=>",ok,",",pop.NextOffset)
if !ok {
- log.Debug("not commit message, skip further processing,tags:",tags,",",ctx.Response.String())
+ log.Debug("not commit message, skip further processing,tags:", tags, ",", ctx.Response.String())
releaseCtx(ctx)
return nil
}
@@ -287,10 +286,9 @@ func (processor *FlowRunnerProcessor) Process(ctx *pipeline.Context) error {
}
}
- log.Infof("success replay %v messages from queue:[%v,%v], elapsed:%v",len(messages), qConfig.ID,qConfig.Name, time.Since(start1))
+ log.Infof("success replay %v messages from queue:[%v,%v], elapsed:%v", len(messages), qConfig.ID, qConfig.Name, time.Since(start1))
}
-
if timeout || len(messages) == 0 {
log.Debugf("exit flow_runner, [%v][%v] %v messages, timeout:%v", qConfig.Name, consumer.Name, len(messages), timeout)
return nil
diff --git a/pipeline/index_backup/segment_id.go b/pipeline/index_backup/segment_id.go
index 979238da..be5eb7b2 100644
--- a/pipeline/index_backup/segment_id.go
+++ b/pipeline/index_backup/segment_id.go
@@ -32,15 +32,15 @@ import (
"strings"
)
-func ParseSegmentID(fileName string)string {
- if util.PrefixStr(fileName,"_"){
- arr:=strings.Split(fileName,"_")
- if len(arr)>1{
- firstPart:=arr[1]
- if util.ContainStr(firstPart,"."){
- arr:=strings.Split(firstPart,".")
- if len(arr)>0{
- segmentID:=arr[0]
+func ParseSegmentID(fileName string) string {
+ if util.PrefixStr(fileName, "_") {
+ arr := strings.Split(fileName, "_")
+ if len(arr) > 1 {
+ firstPart := arr[1]
+ if util.ContainStr(firstPart, ".") {
+ arr := strings.Split(firstPart, ".")
+ if len(arr) > 0 {
+ segmentID := arr[0]
return segmentID
}
}
@@ -50,18 +50,17 @@ func ParseSegmentID(fileName string)string {
return ""
}
-//The result will be:
+// The result will be:
// 0 if a==b,
-//-1 if a < b,
-//+1 if a > b.
-func CompareSegmentIDs(id1,id2 string)int {
- if len(id1)!=len(id2){
- if len(id1)>len(id2){
+// -1 if a < b,
+// +1 if a > b.
+func CompareSegmentIDs(id1, id2 string) int {
+ if len(id1) != len(id2) {
+ if len(id1) > len(id2) {
return 1
- }else{
+ } else {
return -1
}
}
- return strings.Compare(id1,id2)
+ return strings.Compare(id1, id2)
}
-
diff --git a/pipeline/index_backup/segment_id_test.go b/pipeline/index_backup/segment_id_test.go
index bc7dde0e..5367dab7 100644
--- a/pipeline/index_backup/segment_id_test.go
+++ b/pipeline/index_backup/segment_id_test.go
@@ -34,28 +34,28 @@ import (
)
func TestUnixstamp(t *testing.T) {
- t1:=time.Now()
+ t1 := time.Now()
fmt.Println(t1.Unix())
- fmt.Println(t1.Add(time.Second*30).Unix())
+ fmt.Println(t1.Add(time.Second * 30).Unix())
fmt.Println(time.Time{}.Unix())
}
func TestParseSegmentID(t *testing.T) {
- fileName:="_3g_Lucene85FieldsIndexfile_pointers_6x"
- fileName1:="_3e.fdt"
+ fileName := "_3g_Lucene85FieldsIndexfile_pointers_6x"
+ fileName1 := "_3e.fdt"
//3g>3e
- segmentID1:=ParseSegmentID(fileName)
+ segmentID1 := ParseSegmentID(fileName)
fmt.Println(segmentID1)
- segmentID2:=ParseSegmentID(fileName1)
+ segmentID2 := ParseSegmentID(fileName1)
fmt.Println(segmentID2)
- fmt.Println(CompareSegmentIDs(segmentID1,segmentID2))
- fmt.Println(CompareSegmentIDs(segmentID2,segmentID1))
- fmt.Println(CompareSegmentIDs(segmentID2,segmentID2))
- fmt.Println(CompareSegmentIDs("12","123"))
+ fmt.Println(CompareSegmentIDs(segmentID1, segmentID2))
+ fmt.Println(CompareSegmentIDs(segmentID2, segmentID1))
+ fmt.Println(CompareSegmentIDs(segmentID2, segmentID2))
+ fmt.Println(CompareSegmentIDs("12", "123"))
}
diff --git a/pipeline/index_diff/index_diff_test.go b/pipeline/index_diff/index_diff_test.go
index 882258cd..de82fdb8 100644
--- a/pipeline/index_diff/index_diff_test.go
+++ b/pipeline/index_diff/index_diff_test.go
@@ -22,6 +22,7 @@
// along with this program. If not, see .
package index_diff
+
//
//import (
// "fmt"
diff --git a/pipeline/queue_consumer/diskqueue_consumer.go b/pipeline/queue_consumer/diskqueue_consumer.go
index c9672170..6f3f852b 100644
--- a/pipeline/queue_consumer/diskqueue_consumer.go
+++ b/pipeline/queue_consumer/diskqueue_consumer.go
@@ -44,7 +44,7 @@ import (
"infini.sh/framework/lib/fasthttp"
)
-var defaultHTTPPool=fasthttp.NewRequestResponsePool("queue_consumer")
+var defaultHTTPPool = fasthttp.NewRequestResponsePool("queue_consumer")
type DiskQueueConsumer struct {
config Config
@@ -186,10 +186,9 @@ func (processor *DiskQueueConsumer) NewBulkWorker(ctx *pipeline.Context, count *
var initOfffset queue.Offset
var offset queue.Offset
-
//acquire consumer
- consumerInstance, err := queue.AcquireConsumer(qConfig, consumer,ctx.ID())
- defer queue.ReleaseConsumer(qConfig, consumer,consumerInstance)
+ consumerInstance, err := queue.AcquireConsumer(qConfig, consumer, ctx.ID())
+ defer queue.ReleaseConsumer(qConfig, consumer, consumerInstance)
if err != nil || consumerInstance == nil {
panic(err)
@@ -227,9 +226,9 @@ READ_DOCS:
}
consumer.KeepActive()
- messages, _, err :=consumerInstance.FetchMessages(ctx1, consumer.FetchMaxMessages)
+ messages, _, err := consumerInstance.FetchMessages(ctx1, consumer.FetchMaxMessages)
- if len(messages)==0{
+ if len(messages) == 0 {
time.Sleep(time.Millisecond * time.Duration(500))
}
@@ -268,7 +267,7 @@ READ_DOCS:
offset = pop.NextOffset
}
- if !offset.Equals(initOfffset){
+ if !offset.Equals(initOfffset) {
ok, err := queue.CommitOffset(qConfig, consumer, offset)
if !ok || err != nil {
panic(err)
diff --git a/pipeline/replication_correlation/replication_correlation.go b/pipeline/replication_correlation/replication_correlation.go
index 5c4508f6..31441854 100644
--- a/pipeline/replication_correlation/replication_correlation.go
+++ b/pipeline/replication_correlation/replication_correlation.go
@@ -52,19 +52,18 @@ type Config struct {
}
type MessageRecord struct {
-
MessageOffset queue.Offset
RecordOffset string
RecordTimestamp string
- recordTime int64
+ recordTime int64
}
type ReplicationCorrectionGroup struct {
partitionID int
- config *Config
- PreStageQueueName string `config:"pre_stage_queue"`
+ config *Config
+ PreStageQueueName string `config:"pre_stage_queue"`
//FirstStageQueueName string `config:"first_stage_queue"`
FinalStageQueueName string `config:"final_stage_queue"`
@@ -106,8 +105,8 @@ func (runner *ReplicationCorrectionProcessor) newGroup(id int) *ReplicationCorre
}
group := ReplicationCorrectionGroup{
- partitionID: id,
- PreStageQueueName: "primary_write_ahead_log" + suffix,
+ partitionID: id,
+ PreStageQueueName: "primary_write_ahead_log" + suffix,
//FirstStageQueueName: "primary_first_commit_log" + suffix,
FinalStageQueueName: "primary_final_commit_log" + suffix,
}
@@ -152,7 +151,7 @@ func (processor *ReplicationCorrectionProcessor) Name() string {
return "replication_correlation"
}
-func (processor *ReplicationCorrectionGroup) fetchMessages(ctx *pipeline.Context,tag string, consumer queue.ConsumerAPI, handler func(consumer queue.ConsumerAPI, msg []queue.Message) bool, wg *sync.WaitGroup) {
+func (processor *ReplicationCorrectionGroup) fetchMessages(ctx *pipeline.Context, tag string, consumer queue.ConsumerAPI, handler func(consumer queue.ConsumerAPI, msg []queue.Message) bool, wg *sync.WaitGroup) {
defer func() {
if !global.Env().IsDebug {
if r := recover(); r != nil {
@@ -185,7 +184,7 @@ Fetch:
panic(err)
}
- log.Debugf("get %v messages from queue: %v, %v", len(messages),tag,ctx1.String())
+ log.Debugf("get %v messages from queue: %v, %v", len(messages), tag, ctx1.String())
if len(messages) > 0 {
@@ -197,7 +196,7 @@ Fetch:
}
if len(messages) == 0 {
- time.Sleep(10*time.Second)
+ time.Sleep(10 * time.Second)
}
if global.ShuttingDown() {
@@ -221,14 +220,14 @@ func (processor *ReplicationCorrectionGroup) cleanup(uID interface{}) {
//processor.finalStageRecords.Delete(uID)
}
-var defaultHTTPPool=fasthttp.NewRequestResponsePool("replication_crc")
+var defaultHTTPPool = fasthttp.NewRequestResponsePool("replication_crc")
-func parseIDAndOffset(v string) (id, offset,timestamp string) {
+func parseIDAndOffset(v string) (id, offset, timestamp string) {
arr := strings.Split(v, "#")
if len(arr) != 3 {
panic("invalid message format:" + v)
}
- return arr[0], arr[1],arr[2]
+ return arr[0], arr[1], arr[2]
}
func (processor *ReplicationCorrectionProcessor) Process(ctx *pipeline.Context) error {
@@ -339,7 +338,7 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
finalCommitqConfig, finalCommitConsumerConfig, finalCommitLogConsumer := processor.getConsumer(processor.FinalStageQueueName)
defer queue.ReleaseConsumer(finalCommitqConfig, finalCommitConsumerConfig, finalCommitLogConsumer)
//check second stage commit
- go processor.fetchMessages(ctx,"final", finalCommitLogConsumer, func(consumer queue.ConsumerAPI, messages []queue.Message) bool {
+ go processor.fetchMessages(ctx, "final", finalCommitLogConsumer, func(consumer queue.ConsumerAPI, messages []queue.Message) bool {
processor.lastTimestampFetchedAnyMessageInFinalStage = time.Now()
@@ -356,8 +355,8 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
processor.lastOffsetInFinalStage = message.NextOffset.Position
v := string(message.Data)
- id, offset,timestamp := parseIDAndOffset(v)
- processor.finalStageRecords.Store(id, MessageRecord{MessageOffset:message.NextOffset,RecordOffset: offset,RecordTimestamp: timestamp})
+ id, offset, timestamp := parseIDAndOffset(v)
+ processor.finalStageRecords.Store(id, MessageRecord{MessageOffset: message.NextOffset, RecordOffset: offset, RecordTimestamp: timestamp})
}
log.Debugf("final stage message count: %v, map:%v", processor.totalMessageProcessedInFinalStage, util.GetSyncMapSize(&processor.finalStageRecords))
return true
@@ -369,7 +368,7 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
walCommitqConfig, walCommitConsumerConfig, WALConsumer := processor.getConsumer(processor.PreStageQueueName)
defer queue.ReleaseConsumer(walCommitqConfig, walCommitConsumerConfig, WALConsumer)
//fetch the message from the wal queue
- go processor.fetchMessages(ctx, "wal",WALConsumer, func(consumer queue.ConsumerAPI, messages []queue.Message) bool {
+ go processor.fetchMessages(ctx, "wal", WALConsumer, func(consumer queue.ConsumerAPI, messages []queue.Message) bool {
processor.lastMessageFetchedTimeInPrepareStage = time.Now()
var lastCommitableMessageOffset queue.Offset
defer func() {
@@ -457,7 +456,7 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
}
if retry_times > processor.config.SafetyCommitRetryTimes {
- stats.Increment("replication_crc", fmt.Sprintf("retry_times_exceed_%v",processor.config.SafetyCommitRetryTimes))
+ stats.Increment("replication_crc", fmt.Sprintf("retry_times_exceed_%v", processor.config.SafetyCommitRetryTimes))
hit = true
}
@@ -536,7 +535,7 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
}
wg.Done()
}()
- var commitAnywayWaitInSeconds=time.Duration(processor.config.SafetyCommitIntervalInSeconds)
+ var commitAnywayWaitInSeconds = time.Duration(processor.config.SafetyCommitIntervalInSeconds)
//first
var lastFirstCommitAbleMessageRecord MessageRecord
@@ -548,7 +547,7 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
var lastFinalCommit time.Time
var needCommitFinalStage bool
- for{
+ for {
if global.ShuttingDown() {
return
@@ -562,46 +561,45 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
return
}
- time.Sleep(10*time.Second)
+ time.Sleep(10 * time.Second)
//cleanup first log
processor.firstStageRecords.Range(func(key, value interface{}) bool {
x := value.(MessageRecord)
- msgTime,err:=util.ToInt64(x.RecordTimestamp)
- if err!=nil{
+ msgTime, err := util.ToInt64(x.RecordTimestamp)
+ if err != nil {
panic(err)
}
var commitAnyway bool
- if !(msgTime > 0 && processor.latestRecordTimestampInPrepareStage>0){
- walHasLag:=queue.ConsumerHasLag(walCommitqConfig,walCommitConsumerConfig)
- if walHasLag{
+ if !(msgTime > 0 && processor.latestRecordTimestampInPrepareStage > 0) {
+ walHasLag := queue.ConsumerHasLag(walCommitqConfig, walCommitConsumerConfig)
+ if walHasLag {
return true
- }else{
- commitAnyway=true
+ } else {
+ commitAnyway = true
}
}
- timegap:=processor.latestRecordTimestampInPrepareStage-msgTime
- if processor.totalMessageProcessedInPrepareStage > 0 && time.Since(processor.lastMessageFetchedTimeInPrepareStage) > time.Second*commitAnywayWaitInSeconds{
- if time.Since(processor.lastMessageFetchedTimeInAnyStage) > time.Second*commitAnywayWaitInSeconds{
- commitAnyway=true
+ timegap := processor.latestRecordTimestampInPrepareStage - msgTime
+ if processor.totalMessageProcessedInPrepareStage > 0 && time.Since(processor.lastMessageFetchedTimeInPrepareStage) > time.Second*commitAnywayWaitInSeconds {
+ if time.Since(processor.lastMessageFetchedTimeInAnyStage) > time.Second*commitAnywayWaitInSeconds {
+ commitAnyway = true
}
}
- if commitAnyway||timegap>processor.config.SafetyCommitIntervalInSeconds{
+ if commitAnyway || timegap > processor.config.SafetyCommitIntervalInSeconds {
//update to latest committable message
- if (commitAnyway&&x.MessageOffset.LatestThan(lastFirstCommitAbleMessageRecord.MessageOffset))||
- (msgTime> lastFirstCommitAbleMessageRecord.recordTime&&x.MessageOffset.LatestThan(lastFirstCommitAbleMessageRecord.MessageOffset)){
- x.recordTime=msgTime
- lastFirstCommitAbleMessageRecord=x
- processor.commitableMessageOffsetInFirstStage=x.MessageOffset
+ if (commitAnyway && x.MessageOffset.LatestThan(lastFirstCommitAbleMessageRecord.MessageOffset)) ||
+ (msgTime > lastFirstCommitAbleMessageRecord.recordTime && x.MessageOffset.LatestThan(lastFirstCommitAbleMessageRecord.MessageOffset)) {
+ x.recordTime = msgTime
+ lastFirstCommitAbleMessageRecord = x
+ processor.commitableMessageOffsetInFirstStage = x.MessageOffset
//needCommitFirstStage=true
- log.Debug("update first commit:",x.MessageOffset)
+ log.Debug("update first commit:", x.MessageOffset)
}
}
-
//if needCommitFirstStage{
// if time.Since(lastFirstCommit)>time.Second*10{
// log.Debug("committing first offset:",processor.commitableMessageOffsetInFirstStage)
@@ -615,53 +613,51 @@ func (processor *ReplicationCorrectionGroup) process(ctx *pipeline.Context, w *s
return true
})
-
//cleanup final log
processor.finalStageRecords.Range(func(key, value interface{}) bool {
x := value.(MessageRecord)
- msgTime,err:=util.ToInt64(x.RecordTimestamp)
- if err!=nil{
+ msgTime, err := util.ToInt64(x.RecordTimestamp)
+ if err != nil {
panic(err)
}
var commitAnyway bool
- if !(msgTime > 0 && processor.latestRecordTimestampInPrepareStage>0){
- walHasLag:=queue.ConsumerHasLag(walCommitqConfig,walCommitConsumerConfig)
- if walHasLag{
+ if !(msgTime > 0 && processor.latestRecordTimestampInPrepareStage > 0) {
+ walHasLag := queue.ConsumerHasLag(walCommitqConfig, walCommitConsumerConfig)
+ if walHasLag {
return true
- }else{
- commitAnyway=true
+ } else {
+ commitAnyway = true
}
}
- timegap:=processor.latestRecordTimestampInPrepareStage-msgTime
- if processor.totalMessageProcessedInPrepareStage > 0 && time.Since(processor.lastMessageFetchedTimeInPrepareStage) > time.Second*commitAnywayWaitInSeconds{
- if time.Since(processor.lastMessageFetchedTimeInAnyStage) > time.Second*commitAnywayWaitInSeconds{
- commitAnyway=true
+ timegap := processor.latestRecordTimestampInPrepareStage - msgTime
+ if processor.totalMessageProcessedInPrepareStage > 0 && time.Since(processor.lastMessageFetchedTimeInPrepareStage) > time.Second*commitAnywayWaitInSeconds {
+ if time.Since(processor.lastMessageFetchedTimeInAnyStage) > time.Second*commitAnywayWaitInSeconds {
+ commitAnyway = true
}
}
- if commitAnyway||timegap>processor.config.SafetyCommitIntervalInSeconds{
+ if commitAnyway || timegap > processor.config.SafetyCommitIntervalInSeconds {
//update to latest committable message
- if (commitAnyway&&x.MessageOffset.LatestThan(lastFinalCommitAbleMessageRecord.MessageOffset))||
- (msgTime> lastFinalCommitAbleMessageRecord.recordTime&&x.MessageOffset.LatestThan(lastFinalCommitAbleMessageRecord.MessageOffset)){
- x.recordTime=msgTime
- lastFinalCommitAbleMessageRecord=x
- processor.commitableMessageOffsetInFinalStage=x.MessageOffset
- needCommitFinalStage=true
- log.Debug("update final commit:",x.MessageOffset)
+ if (commitAnyway && x.MessageOffset.LatestThan(lastFinalCommitAbleMessageRecord.MessageOffset)) ||
+ (msgTime > lastFinalCommitAbleMessageRecord.recordTime && x.MessageOffset.LatestThan(lastFinalCommitAbleMessageRecord.MessageOffset)) {
+ x.recordTime = msgTime
+ lastFinalCommitAbleMessageRecord = x
+ processor.commitableMessageOffsetInFinalStage = x.MessageOffset
+ needCommitFinalStage = true
+ log.Debug("update final commit:", x.MessageOffset)
}
}
-
- if needCommitFinalStage{
- if time.Since(lastFinalCommit)>time.Second*10{
- log.Debug("committing final offset:",processor.commitableMessageOffsetInFinalStage)
+ if needCommitFinalStage {
+ if time.Since(lastFinalCommit) > time.Second*10 {
+ log.Debug("committing final offset:", processor.commitableMessageOffsetInFinalStage)
finalCommitLogConsumer.CommitOffset(processor.commitableMessageOffsetInFinalStage)
- lastFinalCommit=time.Now()
- needCommitFinalStage=false
- timegap1:=msgTime- lastFinalCommitAbleMessageRecord.recordTime
- log.Trace(x.RecordTimestamp,",",x.RecordOffset,",time_gap: ",timegap,"s, ",timegap1,"s, record:",msgTime," vs latest:",processor.latestRecordTimestampInPrepareStage,", updating to commit:",x.MessageOffset,lastFinalCommitAbleMessageRecord,",",processor.config.SafetyCommitIntervalInSeconds)
+ lastFinalCommit = time.Now()
+ needCommitFinalStage = false
+ timegap1 := msgTime - lastFinalCommitAbleMessageRecord.recordTime
+ log.Trace(x.RecordTimestamp, ",", x.RecordOffset, ",time_gap: ", timegap, "s, ", timegap1, "s, record:", msgTime, " vs latest:", processor.latestRecordTimestampInPrepareStage, ", updating to commit:", x.MessageOffset, lastFinalCommitAbleMessageRecord, ",", processor.config.SafetyCommitIntervalInSeconds)
}
}
return true
@@ -702,7 +698,7 @@ func (processor *ReplicationCorrectionGroup) getConsumer(queueName string) (*que
qConfig := queue.GetOrInitConfig(queueName)
cConfig := queue.GetOrInitConsumerConfig(qConfig.ID, "crc", "name1")
consumer, err := queue.AcquireConsumer(qConfig,
- cConfig,"worker_id")
+ cConfig, "worker_id")
if err != nil {
panic(err)
}
diff --git a/proxy/api.go b/proxy/api.go
index 7de47465..c834c2d9 100644
--- a/proxy/api.go
+++ b/proxy/api.go
@@ -48,10 +48,10 @@ func (this *GatewayModule) getConfig(w http.ResponseWriter, req *http.Request, p
v, ok := this.entryPoints[id]
if ok {
cfg := v.GetConfig()
- data:=util.MapStr{
- "entry":cfg,
- "router":v.GetRouterConfig(),
- "flows":common.GetAllFlows(),
+ data := util.MapStr{
+ "entry": cfg,
+ "router": v.GetRouterConfig(),
+ "flows": common.GetAllFlows(),
}
this.WriteJSON(w, data, 200)
diff --git a/proxy/entry/entry.go b/proxy/entry/entry.go
index 4e59ab56..7b63077b 100644
--- a/proxy/entry/entry.go
+++ b/proxy/entry/entry.go
@@ -67,7 +67,7 @@ type Entrypoint struct {
rootCert *x509.Certificate
rootKey *rsa.PrivateKey
rootCertPEM []byte
- schema string
+ schema string
listenAddress string
router *r.Router
server *fasthttp.Server
@@ -96,7 +96,7 @@ func (this *Entrypoint) Start() error {
var ln net.Listener
var err error
- if this.config.NetworkConfig.ReusePort&&!strings.Contains(this.listenAddress,"::") {
+ if this.config.NetworkConfig.ReusePort && !strings.Contains(this.listenAddress, "::") {
log.Debug("reuse port ", this.listenAddress)
ln, err = reuseport.Listen("tcp4", this.config.NetworkConfig.GetBindingAddr())
} else {
@@ -104,7 +104,7 @@ func (this *Entrypoint) Start() error {
}
if err != nil {
- panic(errors.Errorf("error in listener(%v): %s", this.listenAddress,err))
+ panic(errors.Errorf("error in listener(%v): %s", this.listenAddress, err))
}
this.router = r.New()
@@ -116,15 +116,15 @@ func (this *Entrypoint) Start() error {
if len(this.routerConfig.Rules) > 0 {
for _, rule := range this.routerConfig.Rules {
- if this.routerConfig.RuleToggleEnabled && !rule.Enabled{
+ if this.routerConfig.RuleToggleEnabled && !rule.Enabled {
continue
}
flow := common.FilterFlow{}
for _, y := range rule.Flow {
- cfg,err := common.GetFlowConfig(y)
- if err!=nil{
+ cfg, err := common.GetFlowConfig(y)
+ if err != nil {
panic(err)
}
@@ -152,9 +152,9 @@ func (this *Entrypoint) Start() error {
if this.routerConfig.DefaultFlow != "" {
this.router.DefaultFlow = this.routerConfig.DefaultFlow
- if this.router.DefaultFlow!=""{
+ if this.router.DefaultFlow != "" {
//init func
- this.router.NotFound=common.GetFlowProcess(this.router.DefaultFlow)
+ this.router.NotFound = common.GetFlowProcess(this.router.DefaultFlow)
}
} else {
this.router.NotFound = func(ctx *fasthttp.RequestCtx) {
@@ -188,7 +188,7 @@ func (this *Entrypoint) Start() error {
}
if this.config.TCPKeepaliveSeconds <= 0 {
- this.config.TCPKeepaliveSeconds = 15*60
+ this.config.TCPKeepaliveSeconds = 15 * 60
}
if this.config.WriteTimeout <= 0 {
@@ -216,12 +216,12 @@ func (this *Entrypoint) Start() error {
}
this.server = &fasthttp.Server{
- Name: "INFINI",
- NoDefaultServerHeader: true,
- NoDefaultDate: true,
- NoDefaultContentType: true,
- DisableHeaderNamesNormalizing: true,
- DisablePreParseMultipartForm: true,
+ Name: "INFINI",
+ NoDefaultServerHeader: true,
+ NoDefaultDate: true,
+ NoDefaultContentType: true,
+ DisableHeaderNamesNormalizing: true,
+ DisablePreParseMultipartForm: true,
//CloseOnShutdown: true, //TODO
//StreamRequestBody: true, //TODO
Handler: this.router.Handler,
@@ -244,14 +244,14 @@ func (this *Entrypoint) Start() error {
MaxConnsPerIP: this.config.MaxConnsPerIP,
}
- if this.routerConfig.IPAccessRules.Enabled&&len(this.routerConfig.IPAccessRules.ClientIP.DeniedList) > 0 {
+ if this.routerConfig.IPAccessRules.Enabled && len(this.routerConfig.IPAccessRules.ClientIP.DeniedList) > 0 {
log.Tracef("adding %v client ip to denied list", len(this.routerConfig.IPAccessRules.ClientIP.DeniedList))
for _, ip := range this.routerConfig.IPAccessRules.ClientIP.DeniedList {
this.server.AddBlackIPList(ip)
}
}
- if this.routerConfig.IPAccessRules.Enabled&&len(this.routerConfig.IPAccessRules.ClientIP.PermittedList) > 0 {
+ if this.routerConfig.IPAccessRules.Enabled && len(this.routerConfig.IPAccessRules.ClientIP.PermittedList) > 0 {
log.Tracef("adding %v client ip to permitted list", len(this.routerConfig.IPAccessRules.ClientIP.PermittedList))
for _, ip := range this.routerConfig.IPAccessRules.ClientIP.PermittedList {
this.server.AddWhiteIPList(ip)
@@ -270,7 +270,7 @@ func (this *Entrypoint) Start() error {
SessionTicketsDisabled: false,
// ClientAuth: tls.RequireAndVerifyClientCert,
// ClientCAs: caCertPool,
- ClientSessionCache: tls.NewLRUClientSessionCache(this.config.TLSConfig.ClientSessionCacheSize),
+ ClientSessionCache: tls.NewLRUClientSessionCache(this.config.TLSConfig.ClientSessionCacheSize),
CipherSuites: []uint16{
//tls.TLS_AES_128_GCM_SHA256,
//tls.TLS_AES_256_GCM_SHA384,
@@ -292,15 +292,14 @@ func (this *Entrypoint) Start() error {
}
var ca, cert, key string
- cert=this.config.TLSConfig.TLSCertFile
- key=this.config.TLSConfig.TLSKeyFile
+ cert = this.config.TLSConfig.TLSCertFile
+ key = this.config.TLSConfig.TLSKeyFile
log.Trace("using tls connection")
if cert != "" && key != "" {
log.Debug("using pre-defined cert files")
-
} else {
ca = path.Join(global.Env().GetDataDir(), "certs", "root.cert")
cert = path.Join(global.Env().GetDataDir(), "certs", "auto.cert")
@@ -414,7 +413,7 @@ func (this *Entrypoint) Start() error {
panic(err)
}
- stats.RegisterStats(fmt.Sprintf("entry.%v.open_connections",this.GetNameOrID()), func() interface{} {
+ stats.RegisterStats(fmt.Sprintf("entry.%v.open_connections", this.GetNameOrID()), func() interface{} {
return this.server.GetOpenConnectionsCount()
})
@@ -423,24 +422,23 @@ func (this *Entrypoint) Start() error {
return nil
}
-func (this *Entrypoint) GetNameOrID()string{
- if this.config.Name!=""{
+func (this *Entrypoint) GetNameOrID() string {
+ if this.config.Name != "" {
return this.config.Name
- }else if this.config.ID!=""{
+ } else if this.config.ID != "" {
return this.config.ID
- }else{
+ } else {
return "undefined"
}
}
-
-func (this *Entrypoint) GetSchema()string{
- if this.schema!=""{
+func (this *Entrypoint) GetSchema() string {
+ if this.schema != "" {
return this.schema
}
- if this.config.TLSConfig.TLSEnabled{
+ if this.config.TLSConfig.TLSEnabled {
return "https://"
- }else{
+ } else {
return "http://"
}
}
@@ -454,20 +452,20 @@ func (this *Entrypoint) GetRouterConfig() common.RouterConfig {
}
func (this *Entrypoint) GetFlows() map[string]common.FilterFlow {
- cfgs:=map[string]common.FilterFlow{}
+ cfgs := map[string]common.FilterFlow{}
- defaultFlow,err:=common.GetFlow(this.routerConfig.DefaultFlow)
- if err!=nil{
+ defaultFlow, err := common.GetFlow(this.routerConfig.DefaultFlow)
+ if err != nil {
panic(err)
}
- cfgs[this.routerConfig.DefaultFlow]=defaultFlow
+ cfgs[this.routerConfig.DefaultFlow] = defaultFlow
- if this.routerConfig.TracingFlow!=""{
- tracingFlow,err:=common.GetFlow(this.routerConfig.TracingFlow)
- if err!=nil{
+ if this.routerConfig.TracingFlow != "" {
+ tracingFlow, err := common.GetFlow(this.routerConfig.TracingFlow)
+ if err != nil {
panic(err)
}
- cfgs[this.routerConfig.TracingFlow]=tracingFlow
+ cfgs[this.routerConfig.TracingFlow] = tracingFlow
}
return cfgs
@@ -530,23 +528,24 @@ func (this *Entrypoint) Stop() error {
}
}()
- if r := recover(); r != nil {}
- ticker := time.NewTicker(3*time.Second)
+ if r := recover(); r != nil {
+ }
+ ticker := time.NewTicker(3 * time.Second)
for {
select {
case <-ticker.C:
- time.Sleep(1*time.Second)
- if util.ContainStr(this.listenAddress,"0.0.0.0"){
- this.listenAddress=strings.Replace(this.listenAddress,"0.0.0.0","127.0.0.1",-1)
+ time.Sleep(1 * time.Second)
+ if util.ContainStr(this.listenAddress, "0.0.0.0") {
+ this.listenAddress = strings.Replace(this.listenAddress, "0.0.0.0", "127.0.0.1", -1)
}
- util.HttpGet(this.GetSchema()+this.listenAddress+"/")
+ util.HttpGet(this.GetSchema() + this.listenAddress + "/")
case <-ctx.Done():
return
}
}
}(ctx)
- if this.server!=nil{
+ if this.server != nil {
this.server.Shutdown()
}
}
diff --git a/proxy/entry/entry_test.go b/proxy/entry/entry_test.go
index 94c91b33..8ba3d47a 100644
--- a/proxy/entry/entry_test.go
+++ b/proxy/entry/entry_test.go
@@ -44,7 +44,7 @@ func TestMulti(t *testing.T) {
panic(err)
}
- err= entry.Stop()
+ err = entry.Stop()
if err != nil {
panic(err)
}
diff --git a/proxy/filters/debug/echo/echo.go b/proxy/filters/debug/echo/echo.go
index 5805e8e1..2b74598c 100644
--- a/proxy/filters/debug/echo/echo.go
+++ b/proxy/filters/debug/echo/echo.go
@@ -94,7 +94,7 @@ func (filter *Echo) Filter(ctx *fasthttp.RequestCtx) {
if filter.template != nil {
str = filter.template.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
variable, err := ctx.GetValue(tag)
- if err==nil{
+ if err == nil {
return w.Write([]byte(util.ToString(variable)))
}
return -1, err
diff --git a/proxy/filters/debug/echo/echo_test.go b/proxy/filters/debug/echo/echo_test.go
index 4bd8ea81..07d998dc 100644
--- a/proxy/filters/debug/echo/echo_test.go
+++ b/proxy/filters/debug/echo/echo_test.go
@@ -38,5 +38,4 @@ func TestExtractFieldWithTags(t *testing.T) {
results1 := pipeline.ExtractFilterMetadata(echo)
fmt.Println(string(util.MustToJSONBytes(results1)))
-
-}
\ No newline at end of file
+}
diff --git a/proxy/filters/debug/tag/tag.go b/proxy/filters/debug/tag/tag.go
index d7467e5b..ee366061 100644
--- a/proxy/filters/debug/tag/tag.go
+++ b/proxy/filters/debug/tag/tag.go
@@ -34,8 +34,8 @@ import (
)
type Tag struct {
- AddTags []string `config:"add" `
- RemoveTags []string `config:"remove" `
+ AddTags []string `config:"add" `
+ RemoveTags []string `config:"remove" `
}
func init() {
@@ -44,8 +44,7 @@ func init() {
func New(c *config.Config) (pipeline.Filter, error) {
- runner := Tag{
- }
+ runner := Tag{}
if err := c.Unpack(&runner); err != nil {
return nil, fmt.Errorf("failed to unpack the filter configuration : %s", err)
@@ -58,11 +57,10 @@ func (filter *Tag) Name() string {
return "tag"
}
-
func (filter *Tag) Filter(ctx *fasthttp.RequestCtx) {
- if len(filter.AddTags)>0||len(filter.RemoveTags)>0{
- ctx.UpdateTags(filter.AddTags,filter.RemoveTags)
- }
+ if len(filter.AddTags) > 0 || len(filter.RemoveTags) > 0 {
+ ctx.UpdateTags(filter.AddTags, filter.RemoveTags)
+ }
}
diff --git a/proxy/filters/elastic/auto_generate_doc_id.go b/proxy/filters/elastic/auto_generate_doc_id.go
index 7201aeff..0a29bca6 100644
--- a/proxy/filters/elastic/auto_generate_doc_id.go
+++ b/proxy/filters/elastic/auto_generate_doc_id.go
@@ -60,40 +60,38 @@ func (filter *AutoGenerateDocID) Name() string {
return "auto_generate_doc_id"
}
-
func (filter *AutoGenerateDocID) Filter(ctx *fasthttp.RequestCtx) {
- path:=string(ctx.PhantomURI().Path())
- valid,indexPath,typePath,idPath := ParseURLMeta(path)
- if global.Env().IsDebug{
- log.Tracef("auto_generate_doc_id: %v => %v, %v, %v, %v",path,valid,indexPath,typePath,idPath)
+ path := string(ctx.PhantomURI().Path())
+ valid, indexPath, typePath, idPath := ParseURLMeta(path)
+ if global.Env().IsDebug {
+ log.Tracef("auto_generate_doc_id: %v => %v, %v, %v, %v", path, valid, indexPath, typePath, idPath)
}
- if valid{
- if idPath==""{
- idPath=util.GetUUID()
- if filter.Prefix!=""{
- idPath=filter.Prefix+idPath
+ if valid {
+ if idPath == "" {
+ idPath = util.GetUUID()
+ if filter.Prefix != "" {
+ idPath = filter.Prefix + idPath
}
- ctx.Request.Header.Set("X-Generated-ID",idPath)
- ctx.Response.Header.Set("X-Generated-ID",idPath)
+ ctx.Request.Header.Set("X-Generated-ID", idPath)
+ ctx.Response.Header.Set("X-Generated-ID", idPath)
}
uri := ctx.Request.CloneURI()
- uri.SetPath(fmt.Sprintf("/%s/%s/%s",indexPath,typePath,idPath))
+ uri.SetPath(fmt.Sprintf("/%s/%s/%s", indexPath, typePath, idPath))
ctx.Request.SetURI(uri)
fasthttp.ReleaseURI(uri)
}
}
-
-func ParseURLMeta(pathStr string) (valid bool,urlLevelIndex, urlLevelType,id string) {
+func ParseURLMeta(pathStr string) (valid bool, urlLevelIndex, urlLevelType, id string) {
if strings.Index(pathStr, "//") >= 0 {
pathStr = strings.ReplaceAll(pathStr, "//", "/")
}
if strings.LastIndex(pathStr, "/") == 0 {
- return false,urlLevelIndex, urlLevelType,id
+ return false, urlLevelIndex, urlLevelType, id
}
if util.SuffixStr(pathStr, "/") {
@@ -105,8 +103,8 @@ func ParseURLMeta(pathStr string) (valid bool,urlLevelIndex, urlLevelType,id str
last := pathArray[len(pathArray)-1]
//only _doc and _create are valid for create new doc
- if util.PrefixStr(last,"_") && !util.ContainsAnyInArray(last, []string{"_create","_doc"}) {
- return false,urlLevelIndex, urlLevelType,id
+ if util.PrefixStr(last, "_") && !util.ContainsAnyInArray(last, []string{"_create", "_doc"}) {
+ return false, urlLevelIndex, urlLevelType, id
}
switch len(pathArray) {
@@ -126,12 +124,12 @@ func ParseURLMeta(pathStr string) (valid bool,urlLevelIndex, urlLevelType,id str
break
case 2:
urlLevelIndex = pathArray[1]
- return false,urlLevelIndex, urlLevelType,id
+ return false, urlLevelIndex, urlLevelType, id
}
if util.SuffixStr(urlLevelIndex, "_") {
- return false,urlLevelIndex, urlLevelType,id
+ return false, urlLevelIndex, urlLevelType, id
}
- return true,urlLevelIndex, urlLevelType,id
+ return true, urlLevelIndex, urlLevelType, id
}
diff --git a/proxy/filters/elastic/auto_generate_doc_id_test.go b/proxy/filters/elastic/auto_generate_doc_id_test.go
index 4c86d0f8..2be319e6 100644
--- a/proxy/filters/elastic/auto_generate_doc_id_test.go
+++ b/proxy/filters/elastic/auto_generate_doc_id_test.go
@@ -39,71 +39,68 @@ func TestAutoGenerateDocID(t *testing.T) {
//PUT twitter/tweet/1/_create
//POST twitter/tweet/
- path:="/index/doc/"
- valid,urlLevelIndex, urlLevelType,urlLevelID := ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"doc",urlLevelType)
- assert.Equal(t,"",urlLevelID)
-
-
- path="/index/doc"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"doc",urlLevelType)
- assert.Equal(t,"",urlLevelID)
-
- path="/index/_doc"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"_doc",urlLevelType)
- assert.Equal(t,"",urlLevelID)
-
- path="/index/_doc/"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"_doc",urlLevelType)
- assert.Equal(t,"",urlLevelID)
-
- path="/index/doc/1"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"doc",urlLevelType)
- assert.Equal(t,"1",urlLevelID)
-
- path="/index/_doc/1"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"_doc",urlLevelType)
- assert.Equal(t,"1",urlLevelID)
-
- path="/index/_create/1"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"_create",urlLevelType)
- assert.Equal(t,"1",urlLevelID)
-
- path="/index/_doc/1"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,true,valid)
- assert.Equal(t,"index",urlLevelIndex)
- assert.Equal(t,"_doc",urlLevelType)
- assert.Equal(t,"1",urlLevelID)
-
- path="/index/_doc/_bulk"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,false,valid)
-
- path="/index/_doc/_search"
- valid,urlLevelIndex, urlLevelType,urlLevelID = ParseURLMeta(path)
- assert.Equal(t,false,valid)
-
-
+ path := "/index/doc/"
+ valid, urlLevelIndex, urlLevelType, urlLevelID := ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "doc", urlLevelType)
+ assert.Equal(t, "", urlLevelID)
+
+ path = "/index/doc"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "doc", urlLevelType)
+ assert.Equal(t, "", urlLevelID)
+
+ path = "/index/_doc"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "_doc", urlLevelType)
+ assert.Equal(t, "", urlLevelID)
+
+ path = "/index/_doc/"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "_doc", urlLevelType)
+ assert.Equal(t, "", urlLevelID)
+
+ path = "/index/doc/1"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "doc", urlLevelType)
+ assert.Equal(t, "1", urlLevelID)
+
+ path = "/index/_doc/1"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "_doc", urlLevelType)
+ assert.Equal(t, "1", urlLevelID)
+
+ path = "/index/_create/1"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "_create", urlLevelType)
+ assert.Equal(t, "1", urlLevelID)
+
+ path = "/index/_doc/1"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, true, valid)
+ assert.Equal(t, "index", urlLevelIndex)
+ assert.Equal(t, "_doc", urlLevelType)
+ assert.Equal(t, "1", urlLevelID)
+
+ path = "/index/_doc/_bulk"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, false, valid)
+
+ path = "/index/_doc/_search"
+ valid, urlLevelIndex, urlLevelType, urlLevelID = ParseURLMeta(path)
+ assert.Equal(t, false, valid)
}
diff --git a/proxy/filters/elastic/bulk_request_mutate.go b/proxy/filters/elastic/bulk_request_mutate.go
index e53bf382..9ee63da9 100644
--- a/proxy/filters/elastic/bulk_request_mutate.go
+++ b/proxy/filters/elastic/bulk_request_mutate.go
@@ -198,7 +198,7 @@ func (this *ElasticsearchBulkRequestMutate) Filter(ctx *fasthttp.RequestCtx) {
elastic.SafetyAddNewlineBetweenData(bulkBuff, payloadBytes)
}
}
- },nil)
+ }, nil)
if err != nil {
log.Errorf("processing: %v docs, err: %v", docCount, err)
diff --git a/proxy/filters/elastic/bulk_request_resort.go b/proxy/filters/elastic/bulk_request_resort.go
index 16687f88..3cc19c96 100644
--- a/proxy/filters/elastic/bulk_request_resort.go
+++ b/proxy/filters/elastic/bulk_request_resort.go
@@ -119,7 +119,7 @@ func NewBulkRequestResort(c *config.Config) (pipeline.Filter, error) {
runner.OutputQueue.Labels["elasticsearch"] = runner.Elasticsearch
}
- runner.bulkBufferPool=elastic.NewBulkBufferPool("bulk_request_resort",1024*1024*1024,100000)
+ runner.bulkBufferPool = elastic.NewBulkBufferPool("bulk_request_resort", 1024*1024*1024, 100000)
runner.idleTimeout = util.GetDurationOrDefault(runner.IdleTimeoutInSeconds, 10*time.Second)
runner.commitTimeout = util.GetDurationOrDefault(runner.CommitConfig.CommitInterval, 10*time.Second)
@@ -341,7 +341,7 @@ func (filter *BulkRequestResort) Filter(ctx *fasthttp.RequestCtx) {
for partitionID, versions := range docs {
- if len(versions) >1 {
+ if len(versions) > 1 {
//resort again
SortDocumentsByVersion(versions)
}
@@ -427,35 +427,35 @@ func (s *Sorter) run() {
//for _, doc := range docs {
- offset := doc.ThisMessageOffset.Position
- if latestEarlyCommitOffset == -1 || offset < latestEarlyCommitOffset {
- latestEarlyCommitOffset = offset
- }
+ offset := doc.ThisMessageOffset.Position
+ if latestEarlyCommitOffset == -1 || offset < latestEarlyCommitOffset {
+ latestEarlyCommitOffset = offset
+ }
- if lastCommitableOffset == -1 || offset > lastCommitableOffset {
- lastCommitableOffset = offset
- }
+ if lastCommitableOffset == -1 || offset > lastCommitableOffset {
+ lastCommitableOffset = offset
+ }
- v, ok := latestVersions[doc.Path]
- if ok {
- if v >= doc.Version {
- } else {
- latestVersions[doc.Path] = doc.Version
- }
+ v, ok := latestVersions[doc.Path]
+ if ok {
+ if v >= doc.Version {
} else {
latestVersions[doc.Path] = doc.Version
}
+ } else {
+ latestVersions[doc.Path] = doc.Version
+ }
- //add to bulk buffer
- bulkBuffer.WriteMessageID(doc.Path)
+ //add to bulk buffer
+ bulkBuffer.WriteMessageID(doc.Path)
- for _, b := range doc.Payload {
- bulkBuffer.WriteNewByteBufferLine("success", b)
- }
+ for _, b := range doc.Payload {
+ bulkBuffer.WriteNewByteBufferLine("success", b)
+ }
//}
}
hit = true
- }else {
+ } else {
time.Sleep(1 * time.Second)
//log.Error("time.sleep 1s")
}
@@ -475,10 +475,9 @@ func (s *Sorter) run() {
//if it is ok to submit and commit
if mustCommitAndExit || bulkBuffer.GetMessageCount() > 0 &&
- (
- bulkBuffer.GetMessageCount() > s.filter.BatchSizeInDocs ||
- bulkBuffer.GetMessageSize() > s.filter.batchSizeInBytes ||
- util.Since(lastCommitTime) > s.filter.commitTimeout) {
+ (bulkBuffer.GetMessageCount() > s.filter.BatchSizeInDocs ||
+ bulkBuffer.GetMessageSize() > s.filter.batchSizeInBytes ||
+ util.Since(lastCommitTime) > s.filter.commitTimeout) {
if bulkBuffer.GetMessageCount() > 0 {
requests := []queue.ProduceRequest{}
@@ -568,13 +567,13 @@ func (filter *BulkRequestResort) NewDocumentBuffer(partitionID int, queueName st
// Add 将文档添加到缓冲区
func (b *DocumentBuffer) Add(docs []elastic.VersionInfo) {
- RETRY:
+RETRY:
if b.docsCount.Load() > int64(b.maxBufferSize) {
time.Sleep(1 * time.Second)
//log.Error("time.sleep 1s")
//log.Error("buffer full, drop docs")
- if !global.ShuttingDown(){
+ if !global.ShuttingDown() {
goto RETRY
}
}
@@ -608,12 +607,12 @@ func (b *DocumentBuffer) Add(docs []elastic.VersionInfo) {
// GetDocuments 返回最旧的文档通道,最多读取指定数量的文档
func (b *DocumentBuffer) GetDocuments(count int) (int, []elastic.VersionInfo) {
- predictDocs:=int(b.docsCount.Load()) - count
- if predictDocs< b.minBufferSize {
+ predictDocs := int(b.docsCount.Load()) - count
+ if predictDocs < b.minBufferSize {
if util.Since(b.lastWriteTime) < b.idleTimeout {
return 0, []elastic.VersionInfo{}
}
- if b.docsCount.Load()==0{
+ if b.docsCount.Load() == 0 {
return 0, []elastic.VersionInfo{}
}
}
@@ -632,9 +631,9 @@ func (b *DocumentBuffer) GetDocuments(count int) (int, []elastic.VersionInfo) {
case docs := <-b.documents:
//doc := docs.Docs
//if len(docs) > 0 {
- //b.docs.Delete(doc[0].Path) //DELETE map after popup, may unlock the map
- docsToCleanup = append(docsToCleanup, docs)
- docsCountToCleanup += 1//len(docs)
+ //b.docs.Delete(doc[0].Path) //DELETE map after popup, may unlock the map
+ docsToCleanup = append(docsToCleanup, docs)
+ docsCountToCleanup += 1 //len(docs)
//}
if docsCountToCleanup >= count {
goto READ
@@ -726,10 +725,10 @@ READ:
//log.Error("total adding back docs:", len(docsToKeep))
//if len(docsToCleanup)>b.minBufferSize{
- SortDocumentsByTime(docsToCleanup)
+ SortDocumentsByTime(docsToCleanup)
//}
- removedDocs=len(docsToCleanup)
+ removedDocs = len(docsToCleanup)
b.lastToKeep = docsToKeep
//if len(docsToKeep) > 0 {
// for _, v := range docsToKeep {
diff --git a/proxy/filters/elastic/bulk_reshuffle.go b/proxy/filters/elastic/bulk_reshuffle.go
index eddf3e04..9ddd247d 100644
--- a/proxy/filters/elastic/bulk_reshuffle.go
+++ b/proxy/filters/elastic/bulk_reshuffle.go
@@ -402,13 +402,13 @@ func (this *BulkReshuffle) Filter(ctx *fasthttp.RequestCtx) {
panic(errors.Error("queue key can't be nil"))
}
- var skipInit=false
+ var skipInit = false
cfg1, ok := queue.SmartGetConfig(queueKey)
if ok && len(cfg1.Labels) > 0 {
- _,ok:=cfg1.Labels["type"] //check label bulk_reshuffle exists
- if ok{
+ _, ok := cfg1.Labels["type"] //check label bulk_reshuffle exists
+ if ok {
queueConfig = cfg1
- skipInit=true
+ skipInit = true
}
}
@@ -455,8 +455,8 @@ func (this *BulkReshuffle) Filter(ctx *fasthttp.RequestCtx) {
if !ok {
if this.config.BufferPoolEnabled {
buff = this.docBufferPool.Get()
- }else{
- buff=&bytebufferpool.ByteBuffer{}
+ } else {
+ buff = &bytebufferpool.ByteBuffer{}
}
docBuf[queueConfig.Name] = buff
}
@@ -474,8 +474,8 @@ func (this *BulkReshuffle) Filter(ctx *fasthttp.RequestCtx) {
if !ok {
if this.config.BufferPoolEnabled {
buff = this.docBufferPool.Get()
- }else{
- buff=&bytebufferpool.ByteBuffer{}
+ } else {
+ buff = &bytebufferpool.ByteBuffer{}
}
docBuf[queueConfig.Name] = buff
}
diff --git a/proxy/filters/elastic/bulk_reshuffle_test.go b/proxy/filters/elastic/bulk_reshuffle_test.go
index 57d74743..38f7fb03 100644
--- a/proxy/filters/elastic/bulk_reshuffle_test.go
+++ b/proxy/filters/elastic/bulk_reshuffle_test.go
@@ -57,18 +57,18 @@ import (
//}
func TestParseBulkRequestWithDelete(t *testing.T) {
- data:=[]byte("{\"delete\":{\"_index\":\"idx-familycloud-stdfile2\",\"_id\":\"1411aX3240ge17520221106010809oh0\",\"routing\":\"ab1daa0979a64f32994a81c0091b1577\"}}\n{ \"create\" : { \"_index\" : \"my_index\", \"_id\" : \"2\"} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}")
+ data := []byte("{\"delete\":{\"_index\":\"idx-familycloud-stdfile2\",\"_id\":\"1411aX3240ge17520221106010809oh0\",\"routing\":\"ab1daa0979a64f32994a81c0091b1577\"}}\n{ \"create\" : { \"_index\" : \"my_index\", \"_id\" : \"2\"} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}")
fmt.Println(string(data))
}
func TestParseBulkRequestWithOnlyDelete(t *testing.T) {
- data:=[]byte("{\"delete\":{\"_index\":\"idx-familycloud-stdfile2\",\"_id\":\"1411aX3240ge17520221106010809oh0\",\"routing\":\"ab1daa0979a64f32994a81c0091b1577\"}}\n")
+ data := []byte("{\"delete\":{\"_index\":\"idx-familycloud-stdfile2\",\"_id\":\"1411aX3240ge17520221106010809oh0\",\"routing\":\"ab1daa0979a64f32994a81c0091b1577\"}}\n")
fmt.Println(string(data))
}
-//有 partition 和没有 partition 可能有不同的解析行为
+// 有 partition 和没有 partition 可能有不同的解析行为
func TestBulkReshuffle_MixedRequests(t *testing.T) {
- data:="{\"update\":{\"_index\":\"idx-50\",\"_id\":\"ceq16t3q50k2vhtav6f0\",\"routing\":\"1513594400\",\"retry_on_conflict\":3}}\n{\"doc\":{\"address\":\"\"}}\n{\"delete\":{\"_index\":\"idx-50\",\"_id\":\"ceq16t3q50k2vhtav6g0\",\"routing\":\"1513594401\"}}\n{ \"create\" : { \"_index\" : \"idx-50\", \"_id\" : \"ceq16t3q50k2vhtav6gg\"} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}\n"
+ data := "{\"update\":{\"_index\":\"idx-50\",\"_id\":\"ceq16t3q50k2vhtav6f0\",\"routing\":\"1513594400\",\"retry_on_conflict\":3}}\n{\"doc\":{\"address\":\"\"}}\n{\"delete\":{\"_index\":\"idx-50\",\"_id\":\"ceq16t3q50k2vhtav6g0\",\"routing\":\"1513594401\"}}\n{ \"create\" : { \"_index\" : \"idx-50\", \"_id\" : \"ceq16t3q50k2vhtav6gg\"} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}\n"
fmt.Println(string(data))
}
@@ -77,16 +77,16 @@ func TestParseActionMeta1(t *testing.T) {
data := []byte("{\"index\":{\"_index\":\"medcl1\",\"_type\":\"_doc\",\"_id\":\"GZq-bnYBC53QmW9Kk2ve\"}}")
action := util.ExtractFieldFromBytes(&data, elastic.ActionStart, elastic.ActionEnd, nil)
fmt.Println(string(action))
- indexb,_,_,_:=jsonparser.Get(data,util.UnsafeBytesToString(action),"_index")
+ indexb, _, _, _ := jsonparser.Get(data, util.UnsafeBytesToString(action), "_index")
fmt.Println(string(indexb))
- assert.Equal(t,string(action),"index")
- assert.Equal(t,string(indexb),"medcl1")
- idb,_,_,_:=jsonparser.Get(data,util.UnsafeBytesToString(action),"_id")
- assert.Equal(t,string(idb),"GZq-bnYBC53QmW9Kk2ve")
+ assert.Equal(t, string(action), "index")
+ assert.Equal(t, string(indexb), "medcl1")
+ idb, _, _, _ := jsonparser.Get(data, util.UnsafeBytesToString(action), "_id")
+ assert.Equal(t, string(idb), "GZq-bnYBC53QmW9Kk2ve")
//update json bytes
- new,_:=jsonparser.Set(data, []byte("medcl2"),"index","_index")
- fmt.Println("new:",string(new))
+ new, _ := jsonparser.Set(data, []byte("medcl2"), "index", "_index")
+ fmt.Println("new:", string(new))
}
@@ -94,166 +94,158 @@ func TestParseActionMeta2(t *testing.T) {
data := []byte("{\"index\":{\"_index\":\"medcl1\",\"_type\":\"_doc\",\"_id\":\"GZq-bnYBC53QmW9Kk2ve\"}}")
- action, indexb, typeb, idb,_ ,_ := elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ := elastic.ParseActionMeta(data)
fmt.Println(string(action), string(indexb), string(idb))
- assert.Equal(t,string(action),"index")
- assert.Equal(t,string(indexb),"medcl1")
- assert.Equal(t,string(typeb),"_doc")
- assert.Equal(t,string(idb),"GZq-bnYBC53QmW9Kk2ve")
-
+ assert.Equal(t, string(action), "index")
+ assert.Equal(t, string(indexb), "medcl1")
+ assert.Equal(t, string(typeb), "_doc")
+ assert.Equal(t, string(idb), "GZq-bnYBC53QmW9Kk2ve")
data = []byte("{\"index\":{\"_type\":\"_doc\",\"_id\":\"GZq-bnYBC53QmW9Kk2ve\",\"_index\":\"medcl1\"}}")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
-
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"index")
- assert.Equal(t,string(indexb),"medcl1")
- assert.Equal(t,string(typeb),"_doc")
- assert.Equal(t,string(idb),"GZq-bnYBC53QmW9Kk2ve")
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "index")
+ assert.Equal(t, string(indexb), "medcl1")
+ assert.Equal(t, string(typeb), "_doc")
+ assert.Equal(t, string(idb), "GZq-bnYBC53QmW9Kk2ve")
data = []byte("{\"index\":{\"_id\":\"GZq-bnYBC53QmW9Kk2ve\",\"_type\":\"_doc\",\"_index\":\"medcl1\"}}")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"index")
- assert.Equal(t,string(indexb),"medcl1")
- assert.Equal(t,string(typeb),"_doc")
- assert.Equal(t,string(idb),"GZq-bnYBC53QmW9Kk2ve")
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "index")
+ assert.Equal(t, string(indexb), "medcl1")
+ assert.Equal(t, string(typeb), "_doc")
+ assert.Equal(t, string(idb), "GZq-bnYBC53QmW9Kk2ve")
data = []byte("{\"index\":{\"_index\":\"test\",\"_type\":\"doc\"}}")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"index")
- assert.Equal(t,string(indexb),"test")
- assert.Equal(t,string(typeb),"doc")
- assert.Equal(t,string(idb),"")
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "index")
+ assert.Equal(t, string(indexb), "test")
+ assert.Equal(t, string(typeb), "doc")
+ assert.Equal(t, string(idb), "")
data = []byte("{\"delete\":{\"_index\":\"test\",\"_type\":\"_doc\"}}")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"delete")
- assert.Equal(t,string(indexb),"test")
- assert.Equal(t,string(typeb),"_doc")
- assert.Equal(t,string(idb),"")
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "delete")
+ assert.Equal(t, string(indexb), "test")
+ assert.Equal(t, string(typeb), "_doc")
+ assert.Equal(t, string(idb), "")
data = []byte("{\"create\":{\"_index\":\"test\",\"_type\":\"_doc\"}}")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"create")
- assert.Equal(t,string(indexb),"test")
- assert.Equal(t,string(typeb),"_doc")
- assert.Equal(t,string(idb),"")
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "create")
+ assert.Equal(t, string(indexb), "test")
+ assert.Equal(t, string(typeb), "_doc")
+ assert.Equal(t, string(idb), "")
data = []byte("{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"update")
- assert.Equal(t,string(indexb),"test")
- assert.Equal(t,string(typeb),"")
- assert.Equal(t,string(idb),"1")
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "update")
+ assert.Equal(t, string(indexb), "test")
+ assert.Equal(t, string(typeb), "")
+ assert.Equal(t, string(idb), "1")
data = []byte("{ \"update\" : {\"_index\" : \"test\"} }")
- action, indexb, typeb, idb,_,_ = elastic.ParseActionMeta(data)
-
- fmt.Println(string(action), string(indexb), string(idb), )
- assert.Equal(t,string(action),"update")
- assert.Equal(t,string(indexb),"test")
- assert.Equal(t,string(typeb),"")
- assert.Equal(t,string(idb),"")
+ action, indexb, typeb, idb, _, _ = elastic.ParseActionMeta(data)
+ fmt.Println(string(action), string(indexb), string(idb))
+ assert.Equal(t, string(action), "update")
+ assert.Equal(t, string(indexb), "test")
+ assert.Equal(t, string(typeb), "")
+ assert.Equal(t, string(idb), "")
}
func TestParseActionMeta3(t *testing.T) {
data := []byte("{\"index\":{\"_index\":\"medcl1\",\"_type\":\"_doc\",\"_id\":\"GZq-bnYBC53QmW9Kk2ve\"}}")
- newData,err := updateJsonWithNewIndex("index",data,"newIndex","newType","newId")
- fmt.Println(err,string(newData))
- assert.Equal(t,string(newData),"{\"index\":{\"_index\":\"newIndex\",\"_type\":\"newType\",\"_id\":\"newId\"}}")
-
+ newData, err := updateJsonWithNewIndex("index", data, "newIndex", "newType", "newId")
+ fmt.Println(err, string(newData))
+ assert.Equal(t, string(newData), "{\"index\":{\"_index\":\"newIndex\",\"_type\":\"newType\",\"_id\":\"newId\"}}")
data = []byte("{\"index\":{\"_index\":\"medcl1\",\"_id\":\"GZq-bnYBC53QmW9Kk2ve\"}}")
- newData,err = updateJsonWithNewIndex("index",data,"newIndex","newType","newId")
- fmt.Println(err,string(newData))
- assert.Equal(t,string(newData),"{\"index\":{\"_index\":\"newIndex\",\"_id\":\"newId\",\"_type\":\"newType\"}}")
+ newData, err = updateJsonWithNewIndex("index", data, "newIndex", "newType", "newId")
+ fmt.Println(err, string(newData))
+ assert.Equal(t, string(newData), "{\"index\":{\"_index\":\"newIndex\",\"_id\":\"newId\",\"_type\":\"newType\"}}")
data = []byte("{\"index\":{\"_index\":\"medcl1\",\"_type\":\"doc1\"}}")
- newData,err = updateJsonWithNewIndex("index",data,"newIndex","newType","newId")
- fmt.Println(err,string(newData))
- assert.Equal(t,string(newData),"{\"index\":{\"_index\":\"newIndex\",\"_type\":\"newType\",\"_id\":\"newId\"}}")
-
-
+ newData, err = updateJsonWithNewIndex("index", data, "newIndex", "newType", "newId")
+ fmt.Println(err, string(newData))
+ assert.Equal(t, string(newData), "{\"index\":{\"_index\":\"newIndex\",\"_type\":\"newType\",\"_id\":\"newId\"}}")
data = []byte("{\"index\":{\"_index\":\"medcl1\",\"_type\":\"doc1\"}}")
- newData,err = updateJsonWithNewIndex("index",data,"","","newId")
- fmt.Println(err,string(newData))
- assert.Equal(t,string(newData),"{\"index\":{\"_index\":\"medcl1\",\"_type\":\"doc1\",\"_id\":\"newId\"}}")
+ newData, err = updateJsonWithNewIndex("index", data, "", "", "newId")
+ fmt.Println(err, string(newData))
+ assert.Equal(t, string(newData), "{\"index\":{\"_index\":\"medcl1\",\"_type\":\"doc1\",\"_id\":\"newId\"}}")
}
func TestGetUrlLevelMeta(t *testing.T) {
- pathStr:="/index/_bulk"
+ pathStr := "/index/_bulk"
pathArray := strings.FieldsFunc(pathStr, func(c rune) bool {
- return c=='/'
- } )
- fmt.Println(pathArray,len(pathArray))
+ return c == '/'
+ })
+ fmt.Println(pathArray, len(pathArray))
- pathArray=strings.Split(pathStr,"/")
- fmt.Println(pathArray,len(pathArray))
+ pathArray = strings.Split(pathStr, "/")
+ fmt.Println(pathArray, len(pathArray))
tindex, ttype := elastic.ParseUrlLevelBulkMeta(pathStr)
fmt.Println(tindex, ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"")
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "")
pathStr = "/_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"")
- assert.Equal(t,ttype,"")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "")
+ assert.Equal(t, ttype, "")
pathStr = "//_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"")
- assert.Equal(t,ttype,"")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "")
+ assert.Equal(t, ttype, "")
pathStr = "/index/_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "")
pathStr = "//index/_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "")
pathStr = "//index//_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "")
pathStr = "/index/doc/_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"doc")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "doc")
pathStr = "//index/doc/_bulk"
tindex, ttype = elastic.ParseUrlLevelBulkMeta(pathStr)
- fmt.Println(tindex,ttype)
- assert.Equal(t,tindex,"index")
- assert.Equal(t,ttype,"doc")
+ fmt.Println(tindex, ttype)
+ assert.Equal(t, tindex, "index")
+ assert.Equal(t, ttype, "doc")
}
-
-
diff --git a/proxy/filters/elastic/bulk_response_process.go b/proxy/filters/elastic/bulk_response_process.go
index 94769bb0..df57d5d9 100644
--- a/proxy/filters/elastic/bulk_response_process.go
+++ b/proxy/filters/elastic/bulk_response_process.go
@@ -320,7 +320,7 @@ func NewBulkResponseValidate(c *config.Config) (pipeline.Filter, error) {
runner.id = util.GetUUID()
- runner.bulkBufferPool=elastic.NewBulkBufferPool("bulk_response_process",1024*1024*1024,100000)
+ runner.bulkBufferPool = elastic.NewBulkBufferPool("bulk_response_process", 1024*1024*1024, 100000)
if runner.config.RetryFlow != "" && runner.config.PartialFailureRetry {
flow := common.MustGetFlow(runner.config.RetryFlow)
diff --git a/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning.go b/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning.go
index 5177a5f0..be9ad0de 100644
--- a/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning.go
+++ b/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning.go
@@ -48,7 +48,7 @@ var defaultConfig = Config{
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("date_range_precision_tuning", New,&defaultConfig)
+ pipeline.RegisterFilterPluginWithConfigMetadata("date_range_precision_tuning", New, &defaultConfig)
}
func New(c *config.Config) (pipeline.Filter, error) {
@@ -93,21 +93,21 @@ func (this *DatePrecisionTuning) Filter(ctx *fasthttp.RequestCtx) {
startProcess := false
precisionOffset := 0
matchCount := 0
- block:=body[start:end]
- if global.Env().IsDebug{
- log.Debug("body[start:end]: ",string(body[start:end]))
+ block := body[start:end]
+ if global.Env().IsDebug {
+ log.Debug("body[start:end]: ", string(body[start:end]))
}
- len:=len(block)-1
+ len := len(block) - 1
for i, v := range block {
- if i>1 &&i 1 && i < len {
+ left := block[i-1]
+ right := block[i+1]
if global.Env().IsDebug {
- log.Debug(i,",",string(v),",",block[i-1],",",block[i+1])
+ log.Debug(i, ",", string(v), ",", block[i-1], ",", block[i+1])
}
- if v == 84 &&left > 47 && left < 58 &&right > 47 && right < 58{ //T
+ if v == 84 && left > 47 && left < 58 && right > 47 && right < 58 { //T
startProcess = true
precisionOffset = 0
matchCount++
@@ -115,7 +115,6 @@ func (this *DatePrecisionTuning) Filter(ctx *fasthttp.RequestCtx) {
}
}
-
if startProcess && v > 47 && v < 58 {
precisionOffset++
if precisionOffset <= this.config.TimePrecision {
@@ -153,8 +152,8 @@ func (this *DatePrecisionTuning) Filter(ctx *fasthttp.RequestCtx) {
continue
}
if precisionOffset == 4 {
- if global.Env().IsDebug{
- log.Debug("prev: ",prev,",",prev != 54)
+ if global.Env().IsDebug {
+ log.Debug("prev: ", prev, ",", prev != 54)
}
if prev != 54 { //int:6
body[start+i] = 57
@@ -177,8 +176,8 @@ func (this *DatePrecisionTuning) Filter(ctx *fasthttp.RequestCtx) {
}
})
- if global.Env().IsDebug{
- log.Debug("rewrite success: ",ok,",",string(body),",",this.config.TimePrecision)
+ if global.Env().IsDebug {
+ log.Debug("rewrite success: ", ok, ",", string(body), ",", this.config.TimePrecision)
}
if ok {
diff --git a/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning_test.go b/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning_test.go
index ef646ae1..d0989254 100644
--- a/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning_test.go
+++ b/proxy/filters/elastic/date_range_precision_tuning/date_range_precision_tuning_test.go
@@ -32,132 +32,130 @@ import (
)
func TestDatePrecisionTuning(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.152Z\",\"lte\":\"2020-09-26T08:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
+ data := []byte("{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.152Z\",\"lte\":\"2020-09-26T08:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=0
+ filter.config.TimePrecision = 0
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T23:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T23:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=1
+ filter.config.TimePrecision = 1
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T09:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T09:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=2
+ filter.config.TimePrecision = 2
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:00:00.000Z\",\"lte\":\"2020-09-26T08:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:00:00.000Z\",\"lte\":\"2020-09-26T08:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=3
+ filter.config.TimePrecision = 3
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:20:00.000Z\",\"lte\":\"2020-09-26T08:29:59.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:20:00.000Z\",\"lte\":\"2020-09-26T08:29:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=4
+ filter.config.TimePrecision = 4
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:00.000Z\",\"lte\":\"2020-09-26T08:21:59.999Z\",\"format\":\"strict_date_optional_time\"}")
-
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:00.000Z\",\"lte\":\"2020-09-26T08:21:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=5
+ filter.config.TimePrecision = 5
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:10.000Z\",\"lte\":\"2020-09-26T08:21:19.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:10.000Z\",\"lte\":\"2020-09-26T08:21:19.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=6
+ filter.config.TimePrecision = 6
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.000Z\",\"lte\":\"2020-09-26T08:21:12.999Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.000Z\",\"lte\":\"2020-09-26T08:21:12.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=7
+ filter.config.TimePrecision = 7
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.100Z\",\"lte\":\"2020-09-26T08:21:12.199Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.100Z\",\"lte\":\"2020-09-26T08:21:12.199Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=8
+ filter.config.TimePrecision = 8
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.150Z\",\"lte\":\"2020-09-26T08:21:12.159Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.150Z\",\"lte\":\"2020-09-26T08:21:12.159Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=9
+ filter.config.TimePrecision = 9
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.152Z\",\"lte\":\"2020-09-26T08:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T08:21:12.152Z\",\"lte\":\"2020-09-26T08:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
}
func TestDatePrecisionTuning1(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
+ filter := DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T22:21:12.152Z\",\"lte\":\"2020-09-26T22:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
+ data := []byte("{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T22:21:12.152Z\",\"lte\":\"2020-09-26T22:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
fmt.Println(string(data))
ctx.Request.SetBody(data)
- filter.config.TimePrecision=0
+ filter.config.TimePrecision = 0
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T23:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
-
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T23:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=9
+ filter.config.TimePrecision = 9
filter.Filter(ctx)
- rePrecisedBody=string(ctx.Request.Body())
+ rePrecisedBody = string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T22:21:12.152Z\",\"lte\":\"2020-09-26T22:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
+ assert.Equal(t, rePrecisedBody, "{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T22:21:12.152Z\",\"lte\":\"2020-09-26T22:21:12.152Z\",\"format\":\"strict_date_optional_time\"}")
}
func TestDatePrecisionTuning2(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"type\":\"node_stats\"}},{\"term\":{\"cluster_uuid\":\"OT_m4gvgTvqb-LZjU66NLg\"}},{\"terms\":{\"source_node.uuid\":[\"qIgTsxtuQ8mzAGiBATkqHw\"]}},{\"range\":{\"timestamp\":{\"format\":\"epoch_millis\",\"gte\":1612315985132,\"lte\":1612319585132}}}]}},\"aggs\":{\"nodes\":{\"terms\":{\"field\":\"source_node.uuid\",\"include\":[\"qIgTsxtuQ8mzAGiBATkqHw\"],\"size\":10000},\"aggs\":{\"by_date\":{\"date_histogram\":{\"field\":\"timestamp\",\"min_doc_count\":0,\"fixed_interval\":\"30s\"},\"aggs\":{\"odh_node_cgroup_quota__usage\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpuacct.usage_nanos\"}},\"odh_node_cgroup_quota__periods\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpu.stat.number_of_elapsed_periods\"}},\"odh_node_cgroup_quota__quota\":{\"min\":{\"field\":\"node_stats.os.cgroup.cpu.cfs_quota_micros\"}},\"odh_node_cgroup_quota__usage_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_quota__usage\",\"gap_policy\":\"skip\",\"unit\":\"1s\"}},\"odh_node_cgroup_quota__periods_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_quota__periods\",\"gap_policy\":\"skip\",\"unit\":\"1s\"}},\"odh_node_cgroup_throttled__metric\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpu.stat.time_throttled_nanos\"}},\"odh_node_cgroup_throttled__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_throttled__metric\",\"unit\":\"1s\"}},\"odh_node_cpu_utilization__metric\":{\"max\":{\"field\":\"node_stats.process.cpu.percent\"}},\"odh_node_cpu_utilization__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cpu_utilization__metric\",\"unit\":\"1s\"}},\"odh_node_load_average__metric\":{\"max\":{\"field\":\"node_stats.os.cpu.load_average.1m\"}},\"odh_node_load_average__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_load_average__metric\",\"unit\":\"1s\"}},\"odh_node_jvm_mem_percent__metric\":{\"max\":{\"field\":\"node_stats.jvm.mem.heap_used_percent\"}},\"odh_node_jvm_mem_percent__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_jvm_mem_percent__metric\",\"unit\":\"1s\"}},\"odh_node_free_space__metric\":{\"max\":{\"field\":\"node_stats.fs.total.available_in_bytes\"}},\"odh_node_free_space__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_free_space__metric\",\"unit\":\"1s\"}}}}}}}}")
+ data := []byte("{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"type\":\"node_stats\"}},{\"term\":{\"cluster_uuid\":\"OT_m4gvgTvqb-LZjU66NLg\"}},{\"terms\":{\"source_node.uuid\":[\"qIgTsxtuQ8mzAGiBATkqHw\"]}},{\"range\":{\"timestamp\":{\"format\":\"epoch_millis\",\"gte\":1612315985132,\"lte\":1612319585132}}}]}},\"aggs\":{\"nodes\":{\"terms\":{\"field\":\"source_node.uuid\",\"include\":[\"qIgTsxtuQ8mzAGiBATkqHw\"],\"size\":10000},\"aggs\":{\"by_date\":{\"date_histogram\":{\"field\":\"timestamp\",\"min_doc_count\":0,\"fixed_interval\":\"30s\"},\"aggs\":{\"odh_node_cgroup_quota__usage\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpuacct.usage_nanos\"}},\"odh_node_cgroup_quota__periods\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpu.stat.number_of_elapsed_periods\"}},\"odh_node_cgroup_quota__quota\":{\"min\":{\"field\":\"node_stats.os.cgroup.cpu.cfs_quota_micros\"}},\"odh_node_cgroup_quota__usage_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_quota__usage\",\"gap_policy\":\"skip\",\"unit\":\"1s\"}},\"odh_node_cgroup_quota__periods_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_quota__periods\",\"gap_policy\":\"skip\",\"unit\":\"1s\"}},\"odh_node_cgroup_throttled__metric\":{\"max\":{\"field\":\"node_stats.os.cgroup.cpu.stat.time_throttled_nanos\"}},\"odh_node_cgroup_throttled__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cgroup_throttled__metric\",\"unit\":\"1s\"}},\"odh_node_cpu_utilization__metric\":{\"max\":{\"field\":\"node_stats.process.cpu.percent\"}},\"odh_node_cpu_utilization__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_cpu_utilization__metric\",\"unit\":\"1s\"}},\"odh_node_load_average__metric\":{\"max\":{\"field\":\"node_stats.os.cpu.load_average.1m\"}},\"odh_node_load_average__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_load_average__metric\",\"unit\":\"1s\"}},\"odh_node_jvm_mem_percent__metric\":{\"max\":{\"field\":\"node_stats.jvm.mem.heap_used_percent\"}},\"odh_node_jvm_mem_percent__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_jvm_mem_percent__metric\",\"unit\":\"1s\"}},\"odh_node_free_space__metric\":{\"max\":{\"field\":\"node_stats.fs.total.available_in_bytes\"}},\"odh_node_free_space__metric_deriv\":{\"derivative\":{\"buckets_path\":\"odh_node_free_space__metric\",\"unit\":\"1s\"}}}}}}}}")
fmt.Println(string(data))
- precisionLimit:=4
- ok := util.ProcessJsonData(&data, []byte("range"),150,[][]byte{[]byte("gte"),[]byte("lte")},false, []byte("gte"),[]byte("}"),128, func(data []byte,start, end int) {
+ precisionLimit := 4
+ ok := util.ProcessJsonData(&data, []byte("range"), 150, [][]byte{[]byte("gte"), []byte("lte")}, false, []byte("gte"), []byte("}"), 128, func(data []byte, start, end int) {
fmt.Println(string(data))
startProcess := false
precisionOffset := 0
- matchCount:=0
+ matchCount := 0
for i, v := range data[start:end] {
if v == 84 { //T
startProcess = true
@@ -173,46 +171,46 @@ func TestDatePrecisionTuning2(t *testing.T) {
startProcess = false
continue
}
- if matchCount==1{
+ if matchCount == 1 {
data[start+i] = 48
- }else if matchCount==2{
+ } else if matchCount == 2 {
//prev,_:=strconv.Atoi(string(body[start+i-1]))
- prev:=data[start+i-1]
+ prev := data[start+i-1]
- if precisionOffset==1{
+ if precisionOffset == 1 {
data[start+i] = 50
continue
}
- if precisionOffset==2{
- if prev==48{//int:0
+ if precisionOffset == 2 {
+ if prev == 48 { //int:0
data[start+i] = 57
continue
}
- if prev==49{ //int:1
+ if prev == 49 { //int:1
data[start+i] = 57
continue
}
- if prev==50{ //int:2
+ if prev == 50 { //int:2
data[start+i] = 51
continue
}
}
- if precisionOffset==3{
+ if precisionOffset == 3 {
data[start+i] = 53
continue
}
- if precisionOffset==4{
- if prev!=54{//int:6
+ if precisionOffset == 4 {
+ if prev != 54 { //int:6
data[start+i] = 57
continue
}
}
- if precisionOffset==5{
+ if precisionOffset == 5 {
data[start+i] = 53
continue
}
- if precisionOffset>=6{
+ if precisionOffset >= 6 {
data[start+i] = 57
continue
}
@@ -227,9 +225,9 @@ func TestDatePrecisionTuning2(t *testing.T) {
fmt.Println(ok)
ctx.Request.SetBody(data)
- filter.config.TimePrecision=4
+ filter.config.TimePrecision = 4
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
//assert.Equal(t,rePrecisedBody,"{\"range\":{\"@timestamp\":{\"gte\":\"2019-09-26T00:00:00.000Z\",\"lte\":\"2020-09-26T23:59:59.999Z\",\"format\":\"strict_date_optional_time\"}")
@@ -237,77 +235,75 @@ func TestDatePrecisionTuning2(t *testing.T) {
func TestDatePrecisionTuning4(t *testing.T) {
//data:=[]byte("{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"@timestamp\":{\"gte\":\"2021-12-29T08:50:33.345Z\",\"lte\":\"2022-01-05T08:50:33.345Z\",\"format\":\"strict_date_optional_time\"}}},{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}}],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}},\"aggs\":{\"timeseries\":{\"date_histogram\":{\"field\":\"@timestamp\",\"min_doc_count\":0,\"time_zone\":\"Asia/Shanghai\",\"extended_bounds\":{\"min\":1640767833345,\"max\":1641372633345},\"calendar_interval\":\"1d\"},\"aggs\":{\"61ca57f2-469d-11e7-af02-69e470af7417\":{\"cardinality\":{\"field\":\"source.ip\"}}},\"meta\":{\"timeField\":\"@timestamp\",\"intervalString\":\"1d\",\"bucketSize\":86400,\"seriesId\":\"61ca57f1-469d-11e7-af02-69e470af7417\"}}},\"timeout\":\"30000ms\"}")
- data:=[]byte("12345121231231231232131312312312{\"range\":{\"@timestamp\":{\"gte\":\"2021-12-29T08:50:33.345Z\",\"lte\":\"2022-01-05T08:50:33.345Z\",\"format\":\"strict_date_optional_time\"}}},{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}}],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}},\"aggs\":{\"timeseries\":{\"date_histogram\":{\"field\":\"@timestamp\",\"min_doc_count\":0,\"time_zone\":\"Asia/Shanghai\",\"extended_bounds\":{\"min\":1640767833345,\"max\":1641372633345},\"calendar_interval\":\"1d\"},\"aggs\":{\"61ca57f2-469d-11e7-af02-69e470af7417\":{\"cardinality\":{\"field\":\"source.ip\"}}},\"meta\":{\"timeField\":\"@timestamp\",\"intervalString\":\"1d\",\"bucketSize\":86400,\"seriesId\":\"61ca57f1-469d-11e7-af02-69e470af7417\"}}},\"timeout\":\"30000ms\"}")
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ data := []byte("12345121231231231232131312312312{\"range\":{\"@timestamp\":{\"gte\":\"2021-12-29T08:50:33.345Z\",\"lte\":\"2022-01-05T08:50:33.345Z\",\"format\":\"strict_date_optional_time\"}}},{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}}],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}},\"aggs\":{\"timeseries\":{\"date_histogram\":{\"field\":\"@timestamp\",\"min_doc_count\":0,\"time_zone\":\"Asia/Shanghai\",\"extended_bounds\":{\"min\":1640767833345,\"max\":1641372633345},\"calendar_interval\":\"1d\"},\"aggs\":{\"61ca57f2-469d-11e7-af02-69e470af7417\":{\"cardinality\":{\"field\":\"source.ip\"}}},\"meta\":{\"timeField\":\"@timestamp\",\"intervalString\":\"1d\",\"bucketSize\":86400,\"seriesId\":\"61ca57f1-469d-11e7-af02-69e470af7417\"}}},\"timeout\":\"30000ms\"}")
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
ctx.Request.SetBody(data)
- filter.config.TimePrecision=2
+ filter.config.TimePrecision = 2
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"12345121231231231232131312312312{\"range\":{\"@timestamp\":{\"gte\":\"2021-12-29T08:00:00.000Z\",\"lte\":\"2022-01-05T08:59:59.999Z\",\"format\":\"strict_date_optional_time\"}}},{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}}],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}},\"aggs\":{\"timeseries\":{\"date_histogram\":{\"field\":\"@timestamp\",\"min_doc_count\":0,\"time_zone\":\"Asia/Shanghai\",\"extended_bounds\":{\"min\":1640767833345,\"max\":1641372633345},\"calendar_interval\":\"1d\"},\"aggs\":{\"61ca57f2-469d-11e7-af02-69e470af7417\":{\"cardinality\":{\"field\":\"source.ip\"}}},\"meta\":{\"timeField\":\"@timestamp\",\"intervalString\":\"1d\",\"bucketSize\":86400,\"seriesId\":\"61ca57f1-469d-11e7-af02-69e470af7417\"}}},\"timeout\":\"30000ms\"}")
+ assert.Equal(t, rePrecisedBody, "12345121231231231232131312312312{\"range\":{\"@timestamp\":{\"gte\":\"2021-12-29T08:00:00.000Z\",\"lte\":\"2022-01-05T08:59:59.999Z\",\"format\":\"strict_date_optional_time\"}}},{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}}],\"filter\":[{\"match_all\":{}}],\"should\":[],\"must_not\":[]}},\"aggs\":{\"timeseries\":{\"date_histogram\":{\"field\":\"@timestamp\",\"min_doc_count\":0,\"time_zone\":\"Asia/Shanghai\",\"extended_bounds\":{\"min\":1640767833345,\"max\":1641372633345},\"calendar_interval\":\"1d\"},\"aggs\":{\"61ca57f2-469d-11e7-af02-69e470af7417\":{\"cardinality\":{\"field\":\"source.ip\"}}},\"meta\":{\"timeField\":\"@timestamp\",\"intervalString\":\"1d\",\"bucketSize\":86400,\"seriesId\":\"61ca57f1-469d-11e7-af02-69e470af7417\"}}},\"timeout\":\"30000ms\"}")
}
func TestDatePrecisionTuning3(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\n \"query\": {\n \"query_string\": {\n \"default_field\": \"title\",\n \"query\": \"this range AND gte TO goodbye 2019-09-26T00:10:00.000Z thus\"\n }\n }\n}")
+ data := []byte("{\n \"query\": {\n \"query_string\": {\n \"default_field\": \"title\",\n \"query\": \"this range AND gte TO goodbye 2019-09-26T00:10:00.000Z thus\"\n }\n }\n}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=0
+ filter.config.TimePrecision = 0
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\n \"query\": {\n \"query_string\": {\n \"default_field\": \"title\",\n \"query\": \"this range AND gte TO goodbye 2019-09-26T00:10:00.000Z thus\"\n }\n }\n}")
+ assert.Equal(t, rePrecisedBody, "{\n \"query\": {\n \"query_string\": {\n \"default_field\": \"title\",\n \"query\": \"this range AND gte TO goodbye 2019-09-26T00:10:00.000Z thus\"\n }\n }\n}")
}
func TestDatePrecisionTuning5(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\"version\":true,\"size\":500,\"sort\":[{\"createTime\":{\"order\":\"desc\",\"unmapped_type\":\"boolean\"}}],\"aggs\":{\"2\":{\"date_histogram\":{\"field\":\"createTime\",\"fixed_interval\":\"30s\",\"time_zone\":\"Asia/Shanghai\",\"min_doc_count\":1}}},\"stored_fields\":[\"*\"],\"script_fields\":{},\"docvalue_fields\":[{\"field\":\"createTime\",\"format\":\"date_time\"}],\"_source\":{\"excludes\":[]},\"query\":{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}},{\"range\":{\"createTime\":{\"gte\":\"2022-08-15T11:26:51.953Z\",\"lte\":\"2022-08-15T11:41:51.953Z\",\"format\":\"strict_date_optional_time\"}}}],\"should\":[],\"must_not\":[]}},\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"fragment_size\":2147483647}}")
+ data := []byte("{\"version\":true,\"size\":500,\"sort\":[{\"createTime\":{\"order\":\"desc\",\"unmapped_type\":\"boolean\"}}],\"aggs\":{\"2\":{\"date_histogram\":{\"field\":\"createTime\",\"fixed_interval\":\"30s\",\"time_zone\":\"Asia/Shanghai\",\"min_doc_count\":1}}},\"stored_fields\":[\"*\"],\"script_fields\":{},\"docvalue_fields\":[{\"field\":\"createTime\",\"format\":\"date_time\"}],\"_source\":{\"excludes\":[]},\"query\":{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}},{\"range\":{\"createTime\":{\"gte\":\"2022-08-15T11:26:51.953Z\",\"lte\":\"2022-08-15T11:41:51.953Z\",\"format\":\"strict_date_optional_time\"}}}],\"should\":[],\"must_not\":[]}},\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"fragment_size\":2147483647}}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=6
+ filter.config.TimePrecision = 6
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"version\":true,\"size\":500,\"sort\":[{\"createTime\":{\"order\":\"desc\",\"unmapped_type\":\"boolean\"}}],\"aggs\":{\"2\":{\"date_histogram\":{\"field\":\"createTime\",\"fixed_interval\":\"30s\",\"time_zone\":\"Asia/Shanghai\",\"min_doc_count\":1}}},\"stored_fields\":[\"*\"],\"script_fields\":{},\"docvalue_fields\":[{\"field\":\"createTime\",\"format\":\"date_time\"}],\"_source\":{\"excludes\":[]},\"query\":{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}},{\"range\":{\"createTime\":{\"gte\":\"2022-08-15T11:26:51.000Z\",\"lte\":\"2022-08-15T11:41:51.999Z\",\"format\":\"strict_date_optional_time\"}}}],\"should\":[],\"must_not\":[]}},\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"fragment_size\":2147483647}}")
+ assert.Equal(t, rePrecisedBody, "{\"version\":true,\"size\":500,\"sort\":[{\"createTime\":{\"order\":\"desc\",\"unmapped_type\":\"boolean\"}}],\"aggs\":{\"2\":{\"date_histogram\":{\"field\":\"createTime\",\"fixed_interval\":\"30s\",\"time_zone\":\"Asia/Shanghai\",\"min_doc_count\":1}}},\"stored_fields\":[\"*\"],\"script_fields\":{},\"docvalue_fields\":[{\"field\":\"createTime\",\"format\":\"date_time\"}],\"_source\":{\"excludes\":[]},\"query\":{\"bool\":{\"must\":[],\"filter\":[{\"match_all\":{}},{\"range\":{\"createTime\":{\"gte\":\"2022-08-15T11:26:51.000Z\",\"lte\":\"2022-08-15T11:41:51.999Z\",\"format\":\"strict_date_optional_time\"}}}],\"should\":[],\"must_not\":[]}},\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"fragment_size\":2147483647}}")
}
-
func TestDatePrecisionTuning6(t *testing.T) {
- filter:= DatePrecisionTuning{config: &defaultConfig}
- ctx:=&fasthttp.RequestCtx{}
- ctx.Request=fasthttp.Request{}
+ filter := DatePrecisionTuning{config: &defaultConfig}
+ ctx := &fasthttp.RequestCtx{}
+ ctx.Request = fasthttp.Request{}
ctx.Request.SetRequestURI("/_search")
ctx.Request.Header.SetMethod(fasthttp.MethodPost)
- data:=[]byte("{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"@timestamp\":{\"gte\":\"2024-04-24T08:23:13.301Z\",\"lte\":\"2024-04-24T09:21:12.152Z\",\"format\":\"strict_date_optional_time\"}}},{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}]}},\"aggs\":{\"2\":{\"date_histogram\":{\"interval\":\"200ms\",\"field\":\"@timestamp\",\"min_doc_count\":0,\"format\":\"epoch_millis\"},\"aggs\":{}}}}")
+ data := []byte("{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"@timestamp\":{\"gte\":\"2024-04-24T08:23:13.301Z\",\"lte\":\"2024-04-24T09:21:12.152Z\",\"format\":\"strict_date_optional_time\"}}},{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}]}},\"aggs\":{\"2\":{\"date_histogram\":{\"interval\":\"200ms\",\"field\":\"@timestamp\",\"min_doc_count\":0,\"format\":\"epoch_millis\"},\"aggs\":{}}}}")
ctx.Request.SetBody(data)
- filter.config.TimePrecision=4
+ filter.config.TimePrecision = 4
filter.Filter(ctx)
- rePrecisedBody:=string(ctx.Request.Body())
+ rePrecisedBody := string(ctx.Request.Body())
fmt.Println(rePrecisedBody)
- assert.Equal(t,rePrecisedBody,"{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"@timestamp\":{\"gte\":\"2024-04-24T08:23:00.000Z\",\"lte\":\"2024-04-24T09:21:59.999Z\",\"format\":\"strict_date_optional_time\"}}},{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}]}},\"aggs\":{\"2\":{\"date_histogram\":{\"interval\":\"200ms\",\"field\":\"@timestamp\",\"min_doc_count\":0,\"format\":\"epoch_millis\"},\"aggs\":{}}}}")
+ assert.Equal(t, rePrecisedBody, "{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"@timestamp\":{\"gte\":\"2024-04-24T08:23:00.000Z\",\"lte\":\"2024-04-24T09:21:59.999Z\",\"format\":\"strict_date_optional_time\"}}},{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}]}},\"aggs\":{\"2\":{\"date_histogram\":{\"interval\":\"200ms\",\"field\":\"@timestamp\",\"min_doc_count\":0,\"format\":\"epoch_millis\"},\"aggs\":{}}}}")
}
-
diff --git a/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk.go b/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk.go
index e7ace246..2b5209d1 100644
--- a/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk.go
+++ b/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk.go
@@ -76,21 +76,21 @@ func (filter *RewriteToBulk) Filter(ctx *fasthttp.RequestCtx) {
}
if valid {
- routing:=ctx.PhantomURI().QueryArgs().Peek("routing")
- pipeline:=ctx.PhantomURI().QueryArgs().Peek("pipeline")
- versionType:=ctx.PhantomURI().QueryArgs().Peek("version_type")
- version:=ctx.PhantomURI().QueryArgs().Peek("version")
-
- action:="index"
- if typePath=="_update"{
- action="update"
- typePath=""
- }else if typePath=="_create"{
- action="create"
- typePath=""
- }else if typePath=="_delete"||ctx.IsDelete(){
- action="delete"
- typePath=""
+ routing := ctx.PhantomURI().QueryArgs().Peek("routing")
+ pipeline := ctx.PhantomURI().QueryArgs().Peek("pipeline")
+ versionType := ctx.PhantomURI().QueryArgs().Peek("version_type")
+ version := ctx.PhantomURI().QueryArgs().Peek("version")
+
+ action := "index"
+ if typePath == "_update" {
+ action = "update"
+ typePath = ""
+ } else if typePath == "_create" {
+ action = "create"
+ typePath = ""
+ } else if typePath == "_delete" || ctx.IsDelete() {
+ action = "delete"
+ typePath = ""
}
if idPath == "" && filter.AutoGenerateDocID {
@@ -117,9 +117,9 @@ func (filter *RewriteToBulk) Filter(ctx *fasthttp.RequestCtx) {
panic("index can't be nil")
}
- docBuf.WriteString(fmt.Sprintf("{ \"%v\" : { \"_index\" : \"%s\" ", action,indexPath))
+ docBuf.WriteString(fmt.Sprintf("{ \"%v\" : { \"_index\" : \"%s\" ", action, indexPath))
//write type part
- if typePath != "" &&!filter.RemovedType {
+ if typePath != "" && !filter.RemovedType {
docBuf.WriteString(fmt.Sprintf(", \"_type\" : \"%s\" ", typePath))
}
//write id part
@@ -146,7 +146,7 @@ func (filter *RewriteToBulk) Filter(ctx *fasthttp.RequestCtx) {
//write final part
docBuf.WriteString("} }\n")
- if action!="delete"{
+ if action != "delete" {
body := ctx.Request.Body()
util.WalkBytesAndReplace(body, util.NEWLINE, util.SPACE)
docBuf.Write(bytes.Copy(body))
diff --git a/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk_test.go b/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk_test.go
index eb7015fe..7a93b35b 100644
--- a/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk_test.go
+++ b/proxy/filters/elastic/rewrite_to_bulk/rewrite_to_bulk_test.go
@@ -34,11 +34,11 @@ import (
)
func TestParseURLMeta(t *testing.T) {
- url:="/index/_update/id"
- valid, indexPath, typePath, idPath :=ParseURLMeta(url)
+ url := "/index/_update/id"
+ valid, indexPath, typePath, idPath := ParseURLMeta(url)
fmt.Println(valid, indexPath, typePath, idPath)
assert.Equal(t, valid, true)
assert.Equal(t, indexPath, "index")
assert.Equal(t, typePath, "_update")
- assert.Equal(t,idPath, "id")
-}
\ No newline at end of file
+ assert.Equal(t, idPath, "id")
+}
diff --git a/proxy/filters/routing/redirect.go b/proxy/filters/routing/redirect.go
index b8022644..3b360492 100644
--- a/proxy/filters/routing/redirect.go
+++ b/proxy/filters/routing/redirect.go
@@ -31,8 +31,8 @@ import (
)
type RedirectFilter struct {
- Uri string `config:"uri"`
- Code int `config:"code"`
+ Uri string `config:"uri"`
+ Code int `config:"code"`
}
func (filter *RedirectFilter) Name() string {
@@ -40,12 +40,12 @@ func (filter *RedirectFilter) Name() string {
}
func (filter *RedirectFilter) Filter(ctx *fasthttp.RequestCtx) {
- ctx.Redirect(filter.Uri,filter.Code)
+ ctx.Redirect(filter.Uri, filter.Code)
ctx.Finished()
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("redirect",NewRedirectFilter,&RedirectFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("redirect", NewRedirectFilter, &RedirectFilter{})
}
func NewRedirectFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/script/javascript/event_v0_test.go b/proxy/filters/script/javascript/event_v0_test.go
index 3704360d..2c469e6f 100644
--- a/proxy/filters/script/javascript/event_v0_test.go
+++ b/proxy/filters/script/javascript/event_v0_test.go
@@ -16,6 +16,7 @@
// under the License.
package javascript
+
//
//import (
// "fmt"
diff --git a/proxy/filters/script/javascript/javascript.go b/proxy/filters/script/javascript/javascript.go
index a22cacf8..397a6c2f 100644
--- a/proxy/filters/script/javascript/javascript.go
+++ b/proxy/filters/script/javascript/javascript.go
@@ -19,6 +19,7 @@ package javascript
import (
"bytes"
+ log "github.com/cihub/seelog"
"github.com/dop251/goja"
"github.com/pkg/errors"
"infini.sh/framework/core/config"
@@ -30,14 +31,13 @@ import (
"path"
"path/filepath"
"runtime"
- log "github.com/cihub/seelog"
"strings"
)
var magicChars = `*?[`
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("javascript",NewJavascriptFilter,&jsProcessor{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("javascript", NewJavascriptFilter, &jsProcessor{})
if runtime.GOOS != "windows" {
magicChars = `*?[\`
}
@@ -156,8 +156,8 @@ func loadSources(files ...string) (string, []byte, error) {
}
for _, filePath := range files {
- filePath = path.Join(global.Env().GetDataDir(),"scripts", filePath)
- log.Debugf("loading script: %v",filePath)
+ filePath = path.Join(global.Env().GetDataDir(), "scripts", filePath)
+ log.Debugf("loading script: %v", filePath)
if hasMeta(filePath) {
matches, err := filepath.Glob(filePath)
if err != nil {
@@ -203,10 +203,10 @@ func (p *jsProcessor) Filter(event *fasthttp.RequestCtx) {
var err error
//if p.stats == nil {
- err =s.runProcessFunc(event)
- if err!=nil{
- panic(err)
- }
+ err = s.runProcessFunc(event)
+ if err != nil {
+ panic(err)
+ }
//}
//else {
// rtn, err = p.runWithStats(s, event)
@@ -230,7 +230,6 @@ func (p *jsProcessor) String() string {
return "script=[type=javascript, id=" + p.Tag + ", sources=" + p.sourceFile + "]"
}
-
// hasMeta reports whether path contains any of the magic characters
// recognized by Match/Glob.
func hasMeta(path string) bool {
diff --git a/proxy/filters/script/javascript/module/console/console_test.go b/proxy/filters/script/javascript/module/console/console_test.go
index a7c29739..873ca6ef 100644
--- a/proxy/filters/script/javascript/module/console/console_test.go
+++ b/proxy/filters/script/javascript/module/console/console_test.go
@@ -16,6 +16,7 @@
// under the License.
package console
+
//
//import (
// "testing"
diff --git a/proxy/filters/script/javascript/module/net/net.go b/proxy/filters/script/javascript/module/net/net.go
index 5551bce5..cc4446e5 100644
--- a/proxy/filters/script/javascript/module/net/net.go
+++ b/proxy/filters/script/javascript/module/net/net.go
@@ -27,9 +27,8 @@ import (
// Require registers the net module that provides utilities for working with IP
// addresses. It can be accessed using:
//
-// // javascript
-// var net = require('net');
-//
+// // javascript
+// var net = require('net');
func Require(vm *goja.Runtime, module *goja.Object) {
o := module.Get("exports").(*goja.Object)
o.Set("isIP", isIP)
diff --git a/proxy/filters/script/javascript/module/net/net_test.go b/proxy/filters/script/javascript/module/net/net_test.go
index 1c773c28..711453ba 100644
--- a/proxy/filters/script/javascript/module/net/net_test.go
+++ b/proxy/filters/script/javascript/module/net/net_test.go
@@ -16,6 +16,7 @@
// under the License.
package net_test
+
//
//import (
// "testing"
diff --git a/proxy/filters/script/javascript/module/path/path.go b/proxy/filters/script/javascript/module/path/path.go
index 13937701..3e3fadf3 100644
--- a/proxy/filters/script/javascript/module/path/path.go
+++ b/proxy/filters/script/javascript/module/path/path.go
@@ -29,9 +29,8 @@ import (
// Require registers the path module that provides utilities for working with
// file and directory paths. It can be accessed using:
//
-// // javascript
-// var path = require('path');
-//
+// // javascript
+// var path = require('path');
func Require(vm *goja.Runtime, module *goja.Object) {
setPosix := func(o *goja.Object) *goja.Object {
o.Set("basename", path.Base)
diff --git a/proxy/filters/script/javascript/module/path/path_test.go b/proxy/filters/script/javascript/module/path/path_test.go
index 553bf463..f8267fa4 100644
--- a/proxy/filters/script/javascript/module/path/path_test.go
+++ b/proxy/filters/script/javascript/module/path/path_test.go
@@ -16,6 +16,7 @@
// under the License.
package path_test
+
//
//import (
// "testing"
diff --git a/proxy/filters/script/javascript/module/processor/chain.go b/proxy/filters/script/javascript/module/processor/chain.go
index 66f5f651..7427983e 100644
--- a/proxy/filters/script/javascript/module/processor/chain.go
+++ b/proxy/filters/script/javascript/module/processor/chain.go
@@ -16,6 +16,7 @@
// under the License.
package processor
+
//
//import (
// "github.com/dop251/goja"
diff --git a/proxy/filters/script/javascript/module/processor/processor.go b/proxy/filters/script/javascript/module/processor/processor.go
index 14de3607..effd3de3 100644
--- a/proxy/filters/script/javascript/module/processor/processor.go
+++ b/proxy/filters/script/javascript/module/processor/processor.go
@@ -16,6 +16,7 @@
// under the License.
package processor
+
//
//import (
// "github.com/dop251/goja"
diff --git a/proxy/filters/script/javascript/module/processor/processor_test.go b/proxy/filters/script/javascript/module/processor/processor_test.go
index 00d1c970..cafb3cc2 100644
--- a/proxy/filters/script/javascript/module/processor/processor_test.go
+++ b/proxy/filters/script/javascript/module/processor/processor_test.go
@@ -16,6 +16,7 @@
// under the License.
package processor
+
//
//import (
// "encoding/json"
diff --git a/proxy/filters/script/javascript/module/windows/windows.go b/proxy/filters/script/javascript/module/windows/windows.go
index b7f30671..c598a0ba 100644
--- a/proxy/filters/script/javascript/module/windows/windows.go
+++ b/proxy/filters/script/javascript/module/windows/windows.go
@@ -97,9 +97,8 @@ func commandLineToArgv(cmd string) []string {
// Require registers the windows module that has utilities specific to
// Windows like parsing Windows command lines. It can be accessed using:
//
-// // javascript
-// var windows = require('windows');
-//
+// // javascript
+// var windows = require('windows');
func Require(vm *goja.Runtime, module *goja.Object) {
o := module.Get("exports").(*goja.Object)
diff --git a/proxy/filters/script/javascript/module/windows/windows_test.go b/proxy/filters/script/javascript/module/windows/windows_test.go
index 6d383aff..4a77bf16 100644
--- a/proxy/filters/script/javascript/module/windows/windows_test.go
+++ b/proxy/filters/script/javascript/module/windows/windows_test.go
@@ -16,6 +16,7 @@
// under the License.
package windows
+
//
//import (
// "testing"
diff --git a/proxy/filters/script/javascript/session.go b/proxy/filters/script/javascript/session.go
index cf6bd0b5..041ae2ea 100644
--- a/proxy/filters/script/javascript/session.go
+++ b/proxy/filters/script/javascript/session.go
@@ -194,10 +194,10 @@ func (s *session) setEvent(b *fasthttp.RequestCtx) error {
}
// runProcessFunc executes process() from the JS script.
-func (s *session) runProcessFunc(b *fasthttp.RequestCtx)error {
+func (s *session) runProcessFunc(b *fasthttp.RequestCtx) error {
var err error
defer func() {
- if !global.Env().IsDebug{
+ if !global.Env().IsDebug {
if r := recover(); r != nil {
//log.Error("The javascript processor caused an unexpected panic "+
// "while processing an event. Recovering, but please report this.",
@@ -242,7 +242,7 @@ func (s *session) runProcessFunc(b *fasthttp.RequestCtx)error {
}
if s.evt.IsCancelled() {
- return nil
+ return nil
}
return nil
}
diff --git a/proxy/filters/script/javascript/session_test.go b/proxy/filters/script/javascript/session_test.go
index fa0c589a..cb48c0f0 100644
--- a/proxy/filters/script/javascript/session_test.go
+++ b/proxy/filters/script/javascript/session_test.go
@@ -16,6 +16,7 @@
// under the License.
package javascript
+
//
//import (
// "context"
diff --git a/proxy/filters/security/auth/basic_auth.go b/proxy/filters/security/auth/basic_auth.go
index 40f807df..0a3e3077 100644
--- a/proxy/filters/security/auth/basic_auth.go
+++ b/proxy/filters/security/auth/basic_auth.go
@@ -92,7 +92,7 @@ func (filter *BasicAuth) Name() string {
func (filter *BasicAuth) Filter(ctx *fasthttp.RequestCtx) {
exists, user, pass := ctx.Request.ParseBasicAuth()
- if exists&& len(filter.ValidUsers) > 0 {
+ if exists && len(filter.ValidUsers) > 0 {
p, ok := filter.ValidUsers[util.UnsafeBytesToString(user)]
if ok {
if util.UnsafeBytesToString(pass) == p {
@@ -109,7 +109,7 @@ func (filter *BasicAuth) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("basic_auth",NewBasicAuthFilter,&BasicAuth{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("basic_auth", NewBasicAuthFilter, &BasicAuth{})
}
func NewBasicAuthFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/security/auth/set_basic_auth.go b/proxy/filters/security/auth/set_basic_auth.go
index 05e7a951..1f3042c3 100644
--- a/proxy/filters/security/auth/set_basic_auth.go
+++ b/proxy/filters/security/auth/set_basic_auth.go
@@ -52,7 +52,7 @@ func (filter *SetBasicAuth) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("set_basic_auth",NewSetBasicAuth,&SetBasicAuth{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("set_basic_auth", NewSetBasicAuth, &SetBasicAuth{})
}
func NewSetBasicAuth(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/security/ldap/ldap.go b/proxy/filters/security/ldap/ldap.go
index 5b826aaf..a3da7841 100644
--- a/proxy/filters/security/ldap/ldap.go
+++ b/proxy/filters/security/ldap/ldap.go
@@ -57,8 +57,8 @@ type LDAPFilter struct {
GroupAttribute string `config:"group_attribute"`
Attributes []string `config:"attributes"`
RequireGroup bool `config:"require_group"`
- MaxCacheItems int `config:"max_cache_items"`
- CacheTTL string `config:"cache_ttl"`
+ MaxCacheItems int `config:"max_cache_items"`
+ CacheTTL string `config:"cache_ttl"`
BypassAPIKey bool `config:"bypass_api_key"`
ldapQuery auth.Strategy
@@ -113,7 +113,7 @@ func (filter *LDAPFilter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("ldap_auth",pipeline.FilterConfigChecked(NewLDAPFilter, pipeline.RequireFields("host","bind_dn","base_dn")),&LDAPFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("ldap_auth", pipeline.FilterConfigChecked(NewLDAPFilter, pipeline.RequireFields("host", "bind_dn", "base_dn")), &LDAPFilter{})
}
func NewLDAPFilter(c *config.Config) (pipeline.Filter, error) {
@@ -122,7 +122,7 @@ func NewLDAPFilter(c *config.Config) (pipeline.Filter, error) {
Tls: false,
RequireGroup: true,
Port: 389,
- CacheTTL: "300s",
+ CacheTTL: "300s",
UserFilter: "(uid=%s)",
GroupFilter: "(memberUid=%s)",
UidAttribute: "uid",
@@ -153,14 +153,14 @@ func NewLDAPFilter(c *config.Config) (pipeline.Filter, error) {
}
cacheObj := libcache.LRU.New(runner.MaxCacheItems)
- if runner.CacheTTL!=""{
- cacheObj.SetTTL(util.GetDurationOrDefault(runner.CacheTTL,time.Minute*5))
+ if runner.CacheTTL != "" {
+ cacheObj.SetTTL(util.GetDurationOrDefault(runner.CacheTTL, time.Minute*5))
}
cacheObj.RegisterOnExpired(func(key, _ interface{}) {
cacheObj.Peek(key)
})
- runner.ldapQuery = ldap.NewCached(&cfg,cacheObj)
+ runner.ldapQuery = ldap.NewCached(&cfg, cacheObj)
return &runner, nil
}
diff --git a/proxy/filters/security/ldap/ldap_test.go b/proxy/filters/security/ldap/ldap_test.go
index 3fa878e7..7d6b3931 100644
--- a/proxy/filters/security/ldap/ldap_test.go
+++ b/proxy/filters/security/ldap/ldap_test.go
@@ -45,6 +45,7 @@ func TestLDAPFunctions(t *testing.T) {
UserFilter: "(uid=%s)",
}
+
r := &fasthttp.Request{}
r.SetBasicAuth("galieleo", "password")
diff --git a/proxy/filters/throttle/bulk_request_throttle.go b/proxy/filters/throttle/bulk_request_throttle.go
index 49318869..bd7722ec 100644
--- a/proxy/filters/throttle/bulk_request_throttle.go
+++ b/proxy/filters/throttle/bulk_request_throttle.go
@@ -97,7 +97,7 @@ func (this *ElasticsearchBulkRequestThrottle) Filter(ctx *fasthttp.RequestCtx) {
} else {
indexPayloadStats[index] = v + len(payloadBytes)
}
- },nil)
+ }, nil)
if global.Env().IsDebug {
log.Debug(indexOpStats)
diff --git a/proxy/filters/throttle/context_limiter.go b/proxy/filters/throttle/context_limiter.go
index ebb3006c..9a1ccf7e 100644
--- a/proxy/filters/throttle/context_limiter.go
+++ b/proxy/filters/throttle/context_limiter.go
@@ -39,7 +39,7 @@ type ContextLimitFilter struct {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("context_limiter",NewContextLimitFilter,&ContextLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("context_limiter", NewContextLimitFilter, &ContextLimitFilter{})
}
func NewContextLimitFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/drop_filter.go b/proxy/filters/throttle/drop_filter.go
index cb607d89..71c97a3a 100644
--- a/proxy/filters/throttle/drop_filter.go
+++ b/proxy/filters/throttle/drop_filter.go
@@ -43,7 +43,7 @@ func (filter *DropFilter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("drop", NewDropFilter,&DropFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("drop", NewDropFilter, &DropFilter{})
}
func NewDropFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/health_check.go b/proxy/filters/throttle/health_check.go
index 9a722d13..4f312407 100644
--- a/proxy/filters/throttle/health_check.go
+++ b/proxy/filters/throttle/health_check.go
@@ -60,7 +60,7 @@ func (filter *ElasticsearchHealthCheckFilter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("elasticsearch_health_check", NewHealthCheckFilter,&ElasticsearchHealthCheckFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("elasticsearch_health_check", NewHealthCheckFilter, &ElasticsearchHealthCheckFilter{})
}
func NewHealthCheckFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/request_api_key_limiter.go b/proxy/filters/throttle/request_api_key_limiter.go
index 8cf28a30..3f161cc3 100644
--- a/proxy/filters/throttle/request_api_key_limiter.go
+++ b/proxy/filters/throttle/request_api_key_limiter.go
@@ -38,7 +38,7 @@ type RequestAPIKeyLimitFilter struct {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_api_key_limiter",NewRequestAPIKeyLimitFilter,&RequestAPIKeyLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_api_key_limiter", NewRequestAPIKeyLimitFilter, &RequestAPIKeyLimitFilter{})
}
func NewRequestAPIKeyLimitFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/request_client_ip_limiter.go b/proxy/filters/throttle/request_client_ip_limiter.go
index d350547b..7e2b39ac 100644
--- a/proxy/filters/throttle/request_client_ip_limiter.go
+++ b/proxy/filters/throttle/request_client_ip_limiter.go
@@ -38,7 +38,7 @@ type RequestClientIPLimitFilter struct {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_client_ip_limiter",NewRequestClientIPLimitFilter,&RequestClientIPLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_client_ip_limiter", NewRequestClientIPLimitFilter, &RequestClientIPLimitFilter{})
}
func NewRequestClientIPLimitFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/request_host_limiter.go b/proxy/filters/throttle/request_host_limiter.go
index df6d6970..d2f3992c 100644
--- a/proxy/filters/throttle/request_host_limiter.go
+++ b/proxy/filters/throttle/request_host_limiter.go
@@ -38,7 +38,7 @@ type RequestHostLimitFilter struct {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_host_limiter",NewRequestHostLimitFilter,&RequestHostLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_host_limiter", NewRequestHostLimitFilter, &RequestHostLimitFilter{})
}
func NewRequestHostLimitFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/request_limiter_base.go b/proxy/filters/throttle/request_limiter_base.go
index fc6ec11b..b4cb9979 100644
--- a/proxy/filters/throttle/request_limiter_base.go
+++ b/proxy/filters/throttle/request_limiter_base.go
@@ -48,7 +48,7 @@ type GenericLimiter struct {
RetryDelayInMs int `config:"retry_delay_in_ms"`
Status int `config:"status"`
Message string `config:"message"`
- WarnMessage bool `config:"log_warn_message"`
+ WarnMessage bool `config:"log_warn_message"`
RetriedMessage string `config:"failed_retry_message"`
interval time.Duration
@@ -76,49 +76,49 @@ func (filter *GenericLimiter) init() {
}
func (filter *GenericLimiter) internalProcess(tokenType, token string, ctx *fasthttp.RequestCtx) {
- filter.internalProcessWithValues(tokenType,token,ctx,1,ctx.Request.GetRequestLength())
+ filter.internalProcessWithValues(tokenType, token, ctx, 1, ctx.Request.GetRequestLength())
}
-func (filter *GenericLimiter) internalProcessWithValues(tokenType, token string, ctx *fasthttp.RequestCtx,hits, bytes int) {
+func (filter *GenericLimiter) internalProcessWithValues(tokenType, token string, ctx *fasthttp.RequestCtx, hits, bytes int) {
if global.Env().IsDebug {
- log.Tracef("limit config: %v, type:%v, token:%v", filter,tokenType,token)
+ log.Tracef("limit config: %v, type:%v, token:%v", filter, tokenType, token)
}
if filter.MaxRequests > 0 || filter.MaxBytes > 0 {
retryTimes := 0
RetryRateLimit:
- hitLimit:=false
+ hitLimit := false
var limitType string
- if (filter.MaxRequests > 0 && !rate.GetRateLimiter(filter.uuid+"_limit_requests", token, int(filter.MaxRequests), int(filter.BurstRequests), filter.interval).AllowN(time.Now(),hits)){
- limitType=fmt.Sprintf(">requests: %v/%v",filter.MaxRequests,filter.interval.String())
- hitLimit=true
- }else {
- if (filter.MaxBytes > 0 && !rate.GetRateLimiter(filter.uuid+"_limit_bytes", token, int(filter.MaxBytes), int(filter.BurstBytes), filter.interval).AllowN(time.Now(), bytes)){
- limitType=fmt.Sprintf(">bytes: %v/%v",filter.MaxBytes,filter.interval.String())
- hitLimit=true
+ if filter.MaxRequests > 0 && !rate.GetRateLimiter(filter.uuid+"_limit_requests", token, int(filter.MaxRequests), int(filter.BurstRequests), filter.interval).AllowN(time.Now(), hits) {
+ limitType = fmt.Sprintf(">requests: %v/%v", filter.MaxRequests, filter.interval.String())
+ hitLimit = true
+ } else {
+ if filter.MaxBytes > 0 && !rate.GetRateLimiter(filter.uuid+"_limit_bytes", token, int(filter.MaxBytes), int(filter.BurstBytes), filter.interval).AllowN(time.Now(), bytes) {
+ limitType = fmt.Sprintf(">bytes: %v/%v", filter.MaxBytes, filter.interval.String())
+ hitLimit = true
}
}
- if hitLimit{
+ if hitLimit {
if global.Env().IsDebug {
- log.Warn(tokenType, " ", token, " reached limit, type:",limitType,", message:",filter.Message)
+ log.Warn(tokenType, " ", token, " reached limit, type:", limitType, ", message:", filter.Message)
}
- if filter.MaxRequests > 0 &&filter.MaxRequests 0 && filter.MaxRequests < hits {
+ log.Warn(tokenType, " ", token, " reached limit: ", filter.MaxRequests, " by:", hits, ", seems the limit is too small, message:", filter.Message)
}
- if filter.MaxBytes > 0 &&filter.MaxBytes 0 && filter.MaxBytes < bytes {
+ log.Warn(tokenType, " ", token, " reached limit: ", filter.MaxBytes, " by:", bytes, ", seems the limit is too small, message:", filter.Message)
}
if filter.Action == "drop" {
ctx.SetStatusCode(filter.Status)
ctx.WriteString(filter.Message)
- if filter.WarnMessage{
- log.Warnf("request throttled: %v, %v %v, type: %v, message: %v",tokenType,token,string(ctx.Path()),limitType,filter.Message)
+ if filter.WarnMessage {
+ log.Warnf("request throttled: %v, %v %v, type: %v, message: %v", tokenType, token, string(ctx.Path()), limitType, filter.Message)
}
ctx.Finished()
@@ -128,8 +128,8 @@ func (filter *GenericLimiter) internalProcessWithValues(tokenType, token string,
ctx.SetStatusCode(filter.Status)
ctx.WriteString(filter.RetriedMessage)
- if filter.WarnMessage{
- log.Warnf("request throttled: %v %v %v, type: %v, message: %v",tokenType,token,string(ctx.Path()),limitType,filter.Message)
+ if filter.WarnMessage {
+ log.Warnf("request throttled: %v %v %v, type: %v, message: %v", tokenType, token, string(ctx.Path()), limitType, filter.Message)
}
ctx.Finished()
diff --git a/proxy/filters/throttle/request_path_limiter.go b/proxy/filters/throttle/request_path_limiter.go
index 3700c9e6..00e8f845 100644
--- a/proxy/filters/throttle/request_path_limiter.go
+++ b/proxy/filters/throttle/request_path_limiter.go
@@ -36,13 +36,13 @@ import (
)
type RequestPathLimitFilter struct {
- WarnMessage bool `config:"log_warn_message"`
- Message string `config:"message"`
- Rules []*MatchRules `config:"rules"`
+ WarnMessage bool `config:"log_warn_message"`
+ Message string `config:"message"`
+ Rules []*MatchRules `config:"rules"`
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_path_limiter",NewRequestPathLimitFilter,&RequestPathLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_path_limiter", NewRequestPathLimitFilter, &RequestPathLimitFilter{})
}
func NewRequestPathLimitFilter(c *config.Config) (pipeline.Filter, error) {
@@ -131,8 +131,8 @@ func (filter *RequestPathLimitFilter) Filter(ctx *fasthttp.RequestCtx) {
log.Debug(key, " reach limited ", v.Pattern, ",extract:", item)
}
- if filter.WarnMessage{
- log.Warnf("request throttled: %v",string(ctx.Path()))
+ if filter.WarnMessage {
+ log.Warnf("request throttled: %v", string(ctx.Path()))
}
ctx.SetStatusCode(429)
diff --git a/proxy/filters/throttle/request_user_limiter.go b/proxy/filters/throttle/request_user_limiter.go
index 4f0e9b66..79a73954 100644
--- a/proxy/filters/throttle/request_user_limiter.go
+++ b/proxy/filters/throttle/request_user_limiter.go
@@ -38,7 +38,7 @@ type RequestUserLimitFilter struct {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_user_limiter",NewRequestUserLimitFilter,&RequestUserLimitFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_user_limiter", NewRequestUserLimitFilter, &RequestUserLimitFilter{})
}
func NewRequestUserLimitFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/retry_limiter.go b/proxy/filters/throttle/retry_limiter.go
index 717339bd..9e2fdc5a 100644
--- a/proxy/filters/throttle/retry_limiter.go
+++ b/proxy/filters/throttle/retry_limiter.go
@@ -58,18 +58,18 @@ func (filter *RetryLimiter) Filter(ctx *fasthttp.RequestCtx) {
times = t
}
}
- if global.Env().IsDebug{
- log.Debugf("retry times: %v > %v",times,filter.MaxRetryTimes)
+ if global.Env().IsDebug {
+ log.Debugf("retry times: %v > %v", times, filter.MaxRetryTimes)
}
if times > filter.MaxRetryTimes {
- log.Debugf("hit max retry times: %v > %v",times,filter.MaxRetryTimes)
+ log.Debugf("hit max retry times: %v > %v", times, filter.MaxRetryTimes)
ctx.Finished()
ctx.Request.Header.Del(RetryKey)
queue.Push(queue.GetOrInitConfig(filter.Queue), ctx.Request.Encode())
time.Sleep(time.Duration(filter.SleepInterval) * time.Millisecond)
- if len(filter.TagsOnSuccess)>0{
- ctx.UpdateTags(filter.TagsOnSuccess,nil)
+ if len(filter.TagsOnSuccess) > 0 {
+ ctx.UpdateTags(filter.TagsOnSuccess, nil)
}
return
}
@@ -79,7 +79,7 @@ func (filter *RetryLimiter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("retry_limiter",pipeline.FilterConfigChecked(NewRetryLimiter, pipeline.RequireFields("queue_name")),&RetryLimiter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("retry_limiter", pipeline.FilterConfigChecked(NewRetryLimiter, pipeline.RequireFields("queue_name")), &RetryLimiter{})
}
func NewRetryLimiter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/throttle/sleep.go b/proxy/filters/throttle/sleep.go
index 8d37b92a..6fab582f 100644
--- a/proxy/filters/throttle/sleep.go
+++ b/proxy/filters/throttle/sleep.go
@@ -47,7 +47,7 @@ func (filter *SleepFilter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("sleep",NewSleepFilter,&SleepFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("sleep", NewSleepFilter, &SleepFilter{})
}
func NewSleepFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/transform/context_regex_replace.go b/proxy/filters/transform/context_regex_replace.go
index 4ae4456c..7f61516a 100644
--- a/proxy/filters/transform/context_regex_replace.go
+++ b/proxy/filters/transform/context_regex_replace.go
@@ -60,7 +60,7 @@ func (filter *ContextRegexReplace) Filter(ctx *fasthttp.RequestCtx) {
valueStr := util.ToString(value)
if len(valueStr) > 0 {
newBody := filter.p.ReplaceAll([]byte(valueStr), util.UnsafeStringToBytes(filter.To))
- _,err := ctx.PutValue(filter.Context, string(newBody))
+ _, err := ctx.PutValue(filter.Context, string(newBody))
if err != nil {
log.Error(err)
return
@@ -70,7 +70,7 @@ func (filter *ContextRegexReplace) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("context_regex_replace",NewContextRegexReplace,&ContextRegexReplace{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("context_regex_replace", NewContextRegexReplace, &ContextRegexReplace{})
}
func NewContextRegexReplace(c *config.Config) (filter pipeline.Filter, err error) {
diff --git a/proxy/filters/transform/request_body_json_del.go b/proxy/filters/transform/request_body_json_del.go
index 465fc8c7..5fe871ea 100644
--- a/proxy/filters/transform/request_body_json_del.go
+++ b/proxy/filters/transform/request_body_json_del.go
@@ -72,7 +72,7 @@ func (filter *RequestBodyJsonDel) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_body_json_del",NewRequestBodyJsonDel,&RequestBodyJsonDel{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_body_json_del", NewRequestBodyJsonDel, &RequestBodyJsonDel{})
}
func NewRequestBodyJsonDel(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/transform/request_body_json_set.go b/proxy/filters/transform/request_body_json_set.go
index ddf75a9a..6b3a404a 100644
--- a/proxy/filters/transform/request_body_json_set.go
+++ b/proxy/filters/transform/request_body_json_set.go
@@ -77,7 +77,7 @@ func (filter *RequestBodyJsonSet) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_body_json_set",NewRequestBodyJsonSet,&RequestBodyJsonSet{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_body_json_set", NewRequestBodyJsonSet, &RequestBodyJsonSet{})
}
func NewRequestBodyJsonSet(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/filters/transform/request_body_regex_replace.go b/proxy/filters/transform/request_body_regex_replace.go
index 492cc2cf..a0ca9ff9 100644
--- a/proxy/filters/transform/request_body_regex_replace.go
+++ b/proxy/filters/transform/request_body_regex_replace.go
@@ -58,7 +58,7 @@ func (filter *RequestBodyRegexReplace) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("request_body_regex_replace",NewRequestBodyRegexReplace,&RequestBodyRegexReplace{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("request_body_regex_replace", NewRequestBodyRegexReplace, &RequestBodyRegexReplace{})
}
func NewRequestBodyRegexReplace(c *config.Config) (filter pipeline.Filter, err error) {
diff --git a/proxy/filters/transform/set_context.go b/proxy/filters/transform/set_context.go
index 2f2a9f3a..10ae8ebb 100644
--- a/proxy/filters/transform/set_context.go
+++ b/proxy/filters/transform/set_context.go
@@ -65,7 +65,7 @@ func (filter *SetContext) Filter(ctx *fasthttp.RequestCtx) {
if t != nil {
str = t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
variable, err := ctx.GetValue(tag)
- if err==nil{
+ if err == nil {
return w.Write([]byte(util.ToString(variable)))
}
return -1, err
diff --git a/proxy/filters/transform/set_request_cookie.go b/proxy/filters/transform/set_request_cookie.go
index c741113b..2496624e 100644
--- a/proxy/filters/transform/set_request_cookie.go
+++ b/proxy/filters/transform/set_request_cookie.go
@@ -35,8 +35,8 @@ import (
)
type Cookie struct {
- Reset bool `config:"reset"`//reset request cookies
- Cookies map[string]string `config:"cookies"`//request cookies
+ Reset bool `config:"reset"` //reset request cookies
+ Cookies map[string]string `config:"cookies"` //request cookies
}
func (filter *Cookie) Name() string {
@@ -44,17 +44,17 @@ func (filter *Cookie) Name() string {
}
func (filter *Cookie) Filter(ctx *fasthttp.RequestCtx) {
- if filter.Reset{
+ if filter.Reset {
ctx.Request.Header.DelAllCookies()
}
- for k,v:=range filter.Cookies{
- ctx.Request.Header.SetCookie(k,v)
+ for k, v := range filter.Cookies {
+ ctx.Request.Header.SetCookie(k, v)
}
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("set_request_cookie", NewCookieFilter,&Cookie{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("set_request_cookie", NewCookieFilter, &Cookie{})
}
func NewCookieFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/gateway.go b/proxy/gateway.go
index 90c285eb..d80c799f 100755
--- a/proxy/gateway.go
+++ b/proxy/gateway.go
@@ -262,19 +262,19 @@ func (module *GatewayModule) loadEntryPoints() map[string]*entry.Entrypoint {
entryConfigs := []common.EntryConfig{}
ok, err := env.ParseConfig("gateway", &module)
- if ok && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
+ if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
ok, err = env.ParseConfig("entry", &entryConfigs)
- if ok && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
+ if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
log.Trace(util.ToJson(entryConfigs, true))
ok, err = env.ParseConfig("flow", &flowConfigs)
- if ok && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
+ if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
diff --git a/proxy/output/elastic/elasticsearch.go b/proxy/output/elastic/elasticsearch.go
index d2d10524..5de23619 100644
--- a/proxy/output/elastic/elasticsearch.go
+++ b/proxy/output/elastic/elasticsearch.go
@@ -91,14 +91,14 @@ func init() {
func New(c *config.Config) (pipeline.Filter, error) {
cfg := ProxyConfig{
- Balancer: "weight",
- MaxResponseBodySize: 100 * 1024 * 1024,
- MaxConnection: 5000,
- MaxRetryTimes: 0,
- RetryDelayInMs: 1000,
- TLSInsecureSkipVerify: true,
- ReadBufferSize: 4096 * 4,
- WriteBufferSize: 4096 * 4,
+ Balancer: "weight",
+ MaxResponseBodySize: 100 * 1024 * 1024,
+ MaxConnection: 5000,
+ MaxRetryTimes: 0,
+ RetryDelayInMs: 1000,
+ TLSInsecureSkipVerify: true,
+ ReadBufferSize: 4096 * 4,
+ WriteBufferSize: 4096 * 4,
CheckClusterHealthWhenNotAvailable: true,
//maxt wait timeout for free connection
MaxConnWaitTimeout: util.GetDurationOrDefault("30s", 30*time.Second),
diff --git a/proxy/output/elastic/reverseproxy.go b/proxy/output/elastic/reverseproxy.go
index 79880c3f..51c5a4e9 100644
--- a/proxy/output/elastic/reverseproxy.go
+++ b/proxy/output/elastic/reverseproxy.go
@@ -334,7 +334,7 @@ func NewReverseProxy(cfg *ProxyConfig) *ReverseProxy {
}
}
- p.HTTPPool=fasthttp.NewRequestResponsePool("es_proxy_"+cfg.Elasticsearch)
+ p.HTTPPool = fasthttp.NewRequestResponsePool("es_proxy_" + cfg.Elasticsearch)
return &p
}
@@ -547,7 +547,7 @@ START:
if err != nil {
- retryAble:=false
+ retryAble := false
if util.ContainsAnyInArray(err.Error(), failureMessage) {
stats.Increment("reverse_proxy", "backend_failure")
@@ -563,12 +563,12 @@ START:
}
//server failure flow
} else if res.StatusCode() == 429 {
- if p.proxyConfig.RetryOnBackendBusy{
- retryAble=true
+ if p.proxyConfig.RetryOnBackendBusy {
+ retryAble = true
}
}
- if retryAble{
+ if retryAble {
retry++
if p.proxyConfig.MaxRetryTimes > 0 && retry < p.proxyConfig.MaxRetryTimes {
if p.proxyConfig.RetryDelayInMs > 0 {
@@ -579,7 +579,7 @@ START:
} else {
log.Debugf("reached max retries, failed to proxy request: %v, %v", err, string(myctx.Request.Header.RequestURI()))
}
- }else {
+ } else {
if rate.GetRateLimiterPerSecond(metadata.Config.ID, host+"backend_failure_on_error", 1).Allow() {
log.Warnf("failed to proxy request: %v to host %v, %v, retried: #%v, error:%v", string(myctx.Request.Header.RequestURI()), host, retry, retry, err)
}
diff --git a/proxy/output/kafka/example/main.go b/proxy/output/kafka/example/main.go
index ab425ee4..1caa1993 100644
--- a/proxy/output/kafka/example/main.go
+++ b/proxy/output/kafka/example/main.go
@@ -21,15 +21,15 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-//https://www.sohamkamani.com/golang/working-with-kafka/
+// https://www.sohamkamani.com/golang/working-with-kafka/
package main
import (
"context"
"fmt"
+ "github.com/segmentio/kafka-go"
"log"
"os"
- "github.com/segmentio/kafka-go"
"strconv"
"time"
)
@@ -46,13 +46,12 @@ func main() {
// both the produce and consume functions are
// blocking
- for i:=0;i<10;i++{
+ for i := 0; i < 10; i++ {
go produce(ctx)
}
//consume(ctx)
-
- time.Sleep(1*time.Hour)
+ time.Sleep(1 * time.Hour)
}
func produce(ctx context.Context) {
@@ -61,26 +60,26 @@ func produce(ctx context.Context) {
// intialize the writer with the broker addresses, and the topic
w := kafka.NewWriter(kafka.WriterConfig{
- Brokers: []string{brokerAddress},
- Topic: topic,
- BatchSize: 1000,
+ Brokers: []string{brokerAddress},
+ Topic: topic,
+ BatchSize: 1000,
BatchTimeout: 10 * time.Millisecond,
RequiredAcks: 0,
// assign the logger to the writer
})
- w.AllowAutoTopicCreation=true
+ w.AllowAutoTopicCreation = true
- messages:=[]kafka.Message{}
- j:=0
+ messages := []kafka.Message{}
+ j := 0
for {
- for j=0;j<1000;j++{
- msg:=kafka.Message{
- Key: []byte(strconv.Itoa(i)),
+ for j = 0; j < 1000; j++ {
+ msg := kafka.Message{
+ Key: []byte(strconv.Itoa(i)),
Value: []byte("this is message" + strconv.Itoa(i)),
}
- messages=append(messages,msg)
+ messages = append(messages, msg)
}
err := w.WriteMessages(ctx, messages...)
@@ -88,7 +87,7 @@ func produce(ctx context.Context) {
panic("could not write message " + err.Error())
}
//fmt.Print(".")
- messages=[]kafka.Message{}
+ messages = []kafka.Message{}
//fmt.Println("writes:", i)
i++
}
diff --git a/proxy/output/kafka/kafka.go b/proxy/output/kafka/kafka.go
index 7d86bea6..a0273e31 100644
--- a/proxy/output/kafka/kafka.go
+++ b/proxy/output/kafka/kafka.go
@@ -78,7 +78,7 @@ func (filter *Kafka) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("kafka",NewKafkaFilter,&Kafka{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("kafka", NewKafkaFilter, &Kafka{})
}
func NewKafkaFilter(c *config.Config) (pipeline.Filter, error) {
@@ -109,8 +109,7 @@ func NewKafkaFilter(c *config.Config) (pipeline.Filter, error) {
runner.taskContext = context.Background()
runner.messages = []kafka.Message{}
- runner.lock=sync.Mutex{}
-
+ runner.lock = sync.Mutex{}
return &runner, nil
}
diff --git a/proxy/output/queue/queue.go b/proxy/output/queue/queue.go
index 0bbb0454..afb26d30 100644
--- a/proxy/output/queue/queue.go
+++ b/proxy/output/queue/queue.go
@@ -41,16 +41,16 @@ import (
)
type EnqueueFilter struct {
- DepthThreshold int64 `config:"depth_threshold"`
- Message string `config:"message"` //override the message in the request
+ DepthThreshold int64 `config:"depth_threshold"`
+ Message string `config:"message"` //override the message in the request
- Type string `config:"type"`
- QueueName string `config:"queue_name"`
- Labels map[string]interface{} `config:"labels,omitempty"`
+ Type string `config:"type"`
+ QueueName string `config:"queue_name"`
+ Labels map[string]interface{} `config:"labels,omitempty"`
- SaveMessageOffset bool `config:"save_last_produced_message_offset,omitempty"`
- IncludeResponse bool `config:"include_response,omitempty"`
- LastProducedMessageOffsetKey string `config:"last_produced_message_offset_key,omitempty"`
+ SaveMessageOffset bool `config:"save_last_produced_message_offset,omitempty"`
+ IncludeResponse bool `config:"include_response,omitempty"`
+ LastProducedMessageOffsetKey string `config:"last_produced_message_offset_key,omitempty"`
messageBytes []byte
queueNameTemplate *fasttemplate.Template
messageTemplate *fasttemplate.Template
@@ -110,14 +110,14 @@ func (filter *EnqueueFilter) Filter(ctx *fasthttp.RequestCtx) {
data = filter.messageBytes
}
} else {
- if filter.IncludeResponse{
- buffer:=bytes.Buffer{}
- err:= ctx.Encode(&buffer)
- if err!=nil{
+ if filter.IncludeResponse {
+ buffer := bytes.Buffer{}
+ err := ctx.Encode(&buffer)
+ if err != nil {
panic(err)
}
- data=buffer.Bytes()
- }else{
+ data = buffer.Bytes()
+ } else {
data = ctx.Request.Encode()
}
}
diff --git a/proxy/output/stats/stats.go b/proxy/output/stats/stats.go
index 97610e60..f9da72f5 100644
--- a/proxy/output/stats/stats.go
+++ b/proxy/output/stats/stats.go
@@ -52,7 +52,7 @@ func (filter StatsFilter) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("stats",NewStatsFilter,&StatsFilter{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("stats", NewStatsFilter, &StatsFilter{})
}
func NewStatsFilter(c *config.Config) (pipeline.Filter, error) {
diff --git a/proxy/output/translog/translog.go b/proxy/output/translog/translog.go
index e9480017..b2dfb16c 100644
--- a/proxy/output/translog/translog.go
+++ b/proxy/output/translog/translog.go
@@ -72,7 +72,7 @@ func (filter *TranslogOutput) Filter(ctx *fasthttp.RequestCtx) {
}
func init() {
- pipeline.RegisterFilterPluginWithConfigMetadata("translog",NewTranslogOutput,&TranslogOutput{})
+ pipeline.RegisterFilterPluginWithConfigMetadata("translog", NewTranslogOutput, &TranslogOutput{})
}
func NewTranslogOutput(c *config.Config) (pipeline.Filter, error) {
diff --git a/service/floating_ip/broadcast.go b/service/floating_ip/broadcast.go
index 5fa56dc5..98a944e2 100644
--- a/service/floating_ip/broadcast.go
+++ b/service/floating_ip/broadcast.go
@@ -51,7 +51,7 @@ const (
)
type Request struct {
- IsActive bool `json:"active"`
+ IsActive bool `json:"active"`
FloatingIP string `json:"floating_ip"`
FixedIP string `json:"fixed_ip"`
EchoPort int `json:"echo_port"`
@@ -59,9 +59,10 @@ type Request struct {
}
var lastBroadcast time.Time
-//send a Broadcast message to network to discovery the cluster
+
+// send a Broadcast message to network to discovery the cluster
func Broadcast(config *FloatingIPConfig, req *Request) {
- if config==nil{
+ if config == nil {
panic("invalid config")
}
@@ -82,17 +83,17 @@ func Broadcast(config *FloatingIPConfig, req *Request) {
payload := util.MustToJSONBytes(req)
- _,err=c.Write(payload)
+ _, err = c.Write(payload)
if err != nil {
log.Error(err)
return
}
- lastBroadcast=time.Now()
+ lastBroadcast = time.Now()
}
func ServeMulticastDiscovery(config *FloatingIPConfig, h func(*net.UDPAddr, int, []byte)) {
- if config==nil{
+ if config == nil {
panic("invalid config")
}
@@ -108,7 +109,7 @@ func ServeMulticastDiscovery(config *FloatingIPConfig, h func(*net.UDPAddr, int,
return
}
- err=l.SetReadBuffer(maxDataSize)
+ err = l.SetReadBuffer(maxDataSize)
if err != nil {
log.Error(err)
return
@@ -124,4 +125,3 @@ func ServeMulticastDiscovery(config *FloatingIPConfig, h func(*net.UDPAddr, int,
}
}
-
diff --git a/service/floating_ip/floating_ip.go b/service/floating_ip/floating_ip.go
index d0200be3..440f3e41 100644
--- a/service/floating_ip/floating_ip.go
+++ b/service/floating_ip/floating_ip.go
@@ -43,9 +43,9 @@ import (
"infini.sh/framework/core/env"
"infini.sh/framework/core/errors"
"infini.sh/framework/core/global"
- "infini.sh/framework/core/wrapper/net"
"infini.sh/framework/core/task"
"infini.sh/framework/core/util"
+ "infini.sh/framework/core/wrapper/net"
"infini.sh/gateway/service/heartbeat"
)
@@ -98,7 +98,7 @@ var (
func (module FloatingIPPlugin) Setup() {
ok, err := env.ParseConfig("floating_ip", &floatingIPConfig)
- if ok && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
+ if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
@@ -297,7 +297,7 @@ func (module FloatingIPPlugin) SwitchToStandbyMode(latency time.Duration) {
task.RunWithinGroup("standby", func(ctx context.Context) error {
aliveChan := make(chan bool)
- client:=heartbeat.New()
+ client := heartbeat.New()
go func() {
defer func() {
if !global.Env().IsDebug {
@@ -478,7 +478,7 @@ func (module FloatingIPPlugin) StateMachine() {
}
}()
- client:=heartbeat.New()
+ client := heartbeat.New()
aliveChan := make(chan bool)
go func() {
defer func() {
diff --git a/service/floating_ip/floating_ip_test.go b/service/floating_ip/floating_ip_test.go
index ca4796e0..adb42c79 100644
--- a/service/floating_ip/floating_ip_test.go
+++ b/service/floating_ip/floating_ip_test.go
@@ -29,6 +29,6 @@ import (
)
func TestPingActiveNode(t *testing.T) {
- ok:=pingActiveNode("192.168.3.98")
+ ok := pingActiveNode("192.168.3.98")
fmt.Println(ok)
-}
\ No newline at end of file
+}
diff --git a/service/forcemerge/merge.go b/service/forcemerge/merge.go
index 398c53a4..a7b06125 100644
--- a/service/forcemerge/merge.go
+++ b/service/forcemerge/merge.go
@@ -60,25 +60,24 @@ func (this ForceMergeModule) Name() string {
}
type Discovery struct {
- Enabled bool `config:"enabled"`
- MinIdleTime string `config:"min_idle_time"`
- Interval string `config:"interval"`
- Rules []DiscoveryRule `config:"rules"`
+ Enabled bool `config:"enabled"`
+ MinIdleTime string `config:"min_idle_time"`
+ Interval string `config:"interval"`
+ Rules []DiscoveryRule `config:"rules"`
}
type DiscoveryRule struct {
- IndexPattern string `config:"index_pattern"`
+ IndexPattern string `config:"index_pattern"`
TimeFields []string `config:"timestamp_fields"`
}
type MergeConfig struct {
- Enabled bool `config:"enabled"`
- Elasticsearch string `config:"elasticsearch"`
- Indices []string `config:"indices"`
- MinSegmentCount int `config:"min_num_segments"`
- MaxSegmentCount int `config:"max_num_segments"`
- Discovery Discovery `config:"discovery"`
-
+ Enabled bool `config:"enabled"`
+ Elasticsearch string `config:"elasticsearch"`
+ Indices []string `config:"indices"`
+ MinSegmentCount int `config:"min_num_segments"`
+ MaxSegmentCount int `config:"max_num_segments"`
+ Discovery Discovery `config:"discovery"`
}
var mergeConfig = MergeConfig{}
@@ -86,7 +85,7 @@ var mergeConfig = MergeConfig{}
func (module ForceMergeModule) Setup() {
ok, err := env.ParseConfig("force_merge", &mergeConfig)
- if ok && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
+ if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
@@ -264,44 +263,44 @@ func (module ForceMergeModule) Start() error {
client := elastic.GetClient(mergeConfig.Elasticsearch)
for i, v := range mergeConfig.Indices {
log.Infof("#%v - start forcemerging index [%v]", i, v)
- forceMerge(client,v)
+ forceMerge(client, v)
}
for {
- bytes,err:=queue.Pop(queue.GetOrInitConfig(taskQueue))
- if err!=nil{
+ bytes, err := queue.Pop(queue.GetOrInitConfig(taskQueue))
+ if err != nil {
panic(err)
}
- taskItem:=ForceMergeTaskItem{}
- util.FromJSONBytes(bytes,&taskItem)
+ taskItem := ForceMergeTaskItem{}
+ util.FromJSONBytes(bytes, &taskItem)
client := elastic.GetClient(mergeConfig.Elasticsearch)
- forceMerge(client,taskItem.Index)
+ forceMerge(client, taskItem.Index)
}
}()
- if mergeConfig.Discovery.Enabled{
+ if mergeConfig.Discovery.Enabled {
task1 := task.ScheduleTask{
Description: "discovery indices for force_merge",
Type: "interval",
Interval: "60m",
Task: func(ctx context.Context) {
client := elastic.GetClient(mergeConfig.Elasticsearch)
- for _,v:=range mergeConfig.Discovery.Rules{
- log.Trace("processing index_pattern: ",v.IndexPattern)
- indices,err:=client.GetIndices(v.IndexPattern)
- if err!=nil{
+ for _, v := range mergeConfig.Discovery.Rules {
+ log.Trace("processing index_pattern: ", v.IndexPattern)
+ indices, err := client.GetIndices(v.IndexPattern)
+ if err != nil {
panic(err)
}
- if indices!=nil{
- for _,v:=range (*indices){
- if v.SegmentsCount> int64(mergeConfig.MinSegmentCount){
- task:=ForceMergeTaskItem{Elasticsearch: mergeConfig.Elasticsearch,Index: v.Index}
- log.Trace("add force_merge task to queue,",task)
- err:=queue.Push(queue.GetOrInitConfig(taskQueue),util.MustToJSONBytes(task))
- if err!=nil{
+ if indices != nil {
+ for _, v := range *indices {
+ if v.SegmentsCount > int64(mergeConfig.MinSegmentCount) {
+ task := ForceMergeTaskItem{Elasticsearch: mergeConfig.Elasticsearch, Index: v.Index}
+ log.Trace("add force_merge task to queue,", task)
+ err := queue.Push(queue.GetOrInitConfig(taskQueue), util.MustToJSONBytes(task))
+ if err != nil {
panic(err)
}
}
@@ -316,7 +315,7 @@ func (module ForceMergeModule) Start() error {
return nil
}
-const taskQueue ="force_merge_tasks"
+const taskQueue = "force_merge_tasks"
func (module ForceMergeModule) Stop() error {
diff --git a/service/heartbeat/server.go b/service/heartbeat/server.go
index 02809234..6151e367 100644
--- a/service/heartbeat/server.go
+++ b/service/heartbeat/server.go
@@ -35,14 +35,14 @@ import (
)
var (
-//Req_REGISTER byte = 1 // 1 --- c register cid
-//Res_REGISTER byte = 2 // 2 --- s response
+// Req_REGISTER byte = 1 // 1 --- c register cid
+// Res_REGISTER byte = 2 // 2 --- s response
//
-//Req_HEARTBEAT byte = 3 // 3 --- s send heartbeat req
-//Res_HEARTBEAT byte = 4 // 4 --- c send heartbeat res
+// Req_HEARTBEAT byte = 3 // 3 --- s send heartbeat req
+// Res_HEARTBEAT byte = 4 // 4 --- c send heartbeat res
//
-//Req byte = 5 // 5 --- cs send data
-//Res byte = 6 // 6 --- cs send ack
+// Req byte = 5 // 5 --- cs send data
+// Res byte = 6 // 6 --- cs send ack
)
type CS struct {