+
+## Features
+- fast and precise MIME type and file extension detection
+- long list of [supported MIME types](supported_mimes.md)
+- possibility to [extend](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-Extend) with other file formats
+- common file formats are prioritized
+- [text vs. binary files differentiation](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-TextVsBinary)
+- safe for concurrent usage
+
+## Install
+```bash
+go get github.com/gabriel-vasile/mimetype
+```
+
+## Usage
+```go
+mtype := mimetype.Detect([]byte)
+// OR
+mtype, err := mimetype.DetectReader(io.Reader)
+// OR
+mtype, err := mimetype.DetectFile("/path/to/file")
+fmt.Println(mtype.String(), mtype.Extension())
+```
+See the [runnable Go Playground examples](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#pkg-overview).
+
+## Usage'
+Only use libraries like **mimetype** as a last resort. Content type detection
+using magic numbers is slow, inaccurate, and non-standard. Most of the times
+protocols have methods for specifying such metadata; e.g., `Content-Type` header
+in HTTP and SMTP.
+
+## FAQ
+Q: My file is in the list of [supported MIME types](supported_mimes.md) but
+it is not correctly detected. What should I do?
+
+A: Some file formats (often Microsoft Office documents) keep their signatures
+towards the end of the file. Try increasing the number of bytes used for detection
+with:
+```go
+mimetype.SetLimit(1024*1024) // Set limit to 1MB.
+// or
+mimetype.SetLimit(0) // No limit, whole file content used.
+mimetype.DetectFile("file.doc")
+```
+If increasing the limit does not help, please
+[open an issue](https://github.com/gabriel-vasile/mimetype/issues/new?assignees=&labels=&template=mismatched-mime-type-detected.md&title=).
+
+## Structure
+**mimetype** uses a hierarchical structure to keep the MIME type detection logic.
+This reduces the number of calls needed for detecting the file type. The reason
+behind this choice is that there are file formats used as containers for other
+file formats. For example, Microsoft Office files are just zip archives,
+containing specific metadata files. Once a file has been identified as a
+zip, there is no need to check if it is a text file, but it is worth checking if
+it is an Microsoft Office file.
+
+To prevent loading entire files into memory, when detecting from a
+[reader](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectReader)
+or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFile)
+**mimetype** limits itself to reading only the header of the input.
+
+
+
+
+## Performance
+Thanks to the hierarchical structure, searching for common formats first,
+and limiting itself to file headers, **mimetype** matches the performance of
+stdlib `http.DetectContentType` while outperforming the alternative package.
+
+```bash
+ mimetype http.DetectContentType filetype
+BenchmarkMatchTar-24 250 ns/op 400 ns/op 3778 ns/op
+BenchmarkMatchZip-24 524 ns/op 351 ns/op 4884 ns/op
+BenchmarkMatchJpeg-24 103 ns/op 228 ns/op 839 ns/op
+BenchmarkMatchGif-24 139 ns/op 202 ns/op 751 ns/op
+BenchmarkMatchPng-24 165 ns/op 221 ns/op 1176 ns/op
+```
+
+## Contributing
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
new file mode 100644
index 0000000..0647f73
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
@@ -0,0 +1,309 @@
+package charset
+
+import (
+ "bytes"
+ "encoding/xml"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+)
+
+const (
+ F = 0 /* character never appears in text */
+ T = 1 /* character appears in plain ASCII text */
+ I = 2 /* character appears in ISO-8859 text */
+ X = 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */
+)
+
+var (
+ boms = []struct {
+ bom []byte
+ enc string
+ }{
+ {[]byte{0xEF, 0xBB, 0xBF}, "utf-8"},
+ {[]byte{0x00, 0x00, 0xFE, 0xFF}, "utf-32be"},
+ {[]byte{0xFF, 0xFE, 0x00, 0x00}, "utf-32le"},
+ {[]byte{0xFE, 0xFF}, "utf-16be"},
+ {[]byte{0xFF, 0xFE}, "utf-16le"},
+ }
+
+ // https://github.com/file/file/blob/fa93fb9f7d21935f1c7644c47d2975d31f12b812/src/encoding.c#L241
+ textChars = [256]byte{
+ /* BEL BS HT LF VT FF CR */
+ F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */
+ /* ESC */
+ F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */
+ /* NEL */
+ X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xfX */
+ }
+)
+
+// FromBOM returns the charset declared in the BOM of content.
+func FromBOM(content []byte) string {
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ return b.enc
+ }
+ }
+ return ""
+}
+
+// FromPlain returns the charset of a plain text. It relies on BOM presence
+// and it falls back on checking each byte in content.
+func FromPlain(content []byte) string {
+ if len(content) == 0 {
+ return ""
+ }
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ origContent := content
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return "utf-8"
+ }
+
+ // ASCII is a subset of UTF8. Follow W3C recommendation and replace with UTF8.
+ if ascii(origContent) {
+ return "utf-8"
+ }
+
+ return latin(origContent)
+}
+
+func latin(content []byte) string {
+ hasControlBytes := false
+ for _, b := range content {
+ t := textChars[b]
+ if t != T && t != I {
+ return ""
+ }
+ if b >= 0x80 && b <= 0x9F {
+ hasControlBytes = true
+ }
+ }
+ // Code range 0x80 to 0x9F is reserved for control characters in ISO-8859-1
+ // (so-called C1 Controls). Windows 1252, however, has printable punctuation
+ // characters in this range.
+ if hasControlBytes {
+ return "windows-1252"
+ }
+ return "iso-8859-1"
+}
+
+func ascii(content []byte) bool {
+ for _, b := range content {
+ if textChars[b] != T {
+ return false
+ }
+ }
+ return true
+}
+
+// FromXML returns the charset of an XML document. It relies on the XML
+// header and falls back on the plain
+// text content.
+func FromXML(content []byte) string {
+ if cset := fromXML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+func fromXML(content []byte) string {
+ content = trimLWS(content)
+ dec := xml.NewDecoder(bytes.NewReader(content))
+ rawT, err := dec.RawToken()
+ if err != nil {
+ return ""
+ }
+
+ t, ok := rawT.(xml.ProcInst)
+ if !ok {
+ return ""
+ }
+
+ return strings.ToLower(xmlEncoding(string(t.Inst)))
+}
+
+// FromHTML returns the charset of an HTML document. It first looks if a BOM is
+// present and if so uses it to determine the charset. If no BOM is present,
+// it relies on the meta tag and falls back on the
+// plain text content.
+func FromHTML(content []byte) string {
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ if cset := fromHTML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+
+func fromHTML(content []byte) string {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name := ""
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ name = fromMetaElement(string(val))
+ if name != "" {
+ needPragma = doNeedPragma
+ }
+
+ case "charset":
+ name = string(val)
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ }
+
+ return name
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+func xmlEncoding(s string) string {
+ param := "encoding="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
+
+// trimLWS trims whitespace from beginning of the input.
+// TODO: find a way to call trimLWS once per detection instead of once in each
+// detector which needs the trimmed input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
new file mode 100644
index 0000000..5b2ecee
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
@@ -0,0 +1,567 @@
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package json provides a JSON value parser state machine.
+// This package is almost entirely copied from the Go stdlib.
+// Changes made to it permit users of the package to tell
+// if some slice of bytes is a valid beginning of a json string.
+package json
+
+import (
+ "fmt"
+ "sync"
+)
+
+type (
+ scanStatus int
+)
+
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+
+ scanContinue scanStatus = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+
+ // This limits the max nesting depth to prevent stack overflow.
+ // This is permitted by https://tools.ietf.org/html/rfc7159#section-9
+ maxNestingDepth = 10000
+)
+
+type (
+ scanner struct {
+ step func(*scanner, byte) scanStatus
+ parseState []int
+ endTop bool
+ err error
+ index int
+ }
+)
+
+var scannerPool = sync.Pool{
+ New: func() any {
+ return &scanner{}
+ },
+}
+
+func newScanner() *scanner {
+ s := scannerPool.Get().(*scanner)
+ s.reset()
+ return s
+}
+
+func freeScanner(s *scanner) {
+ // Avoid hanging on to too much memory in extreme cases.
+ if len(s.parseState) > 1024 {
+ s.parseState = nil
+ }
+ scannerPool.Put(s)
+}
+
+// Scan returns the number of bytes scanned and if there was any error
+// in trying to reach the end of data.
+func Scan(data []byte) (int, error) {
+ s := newScanner()
+ defer freeScanner(s)
+ _ = checkValid(data, s)
+ return s.index, s.err
+}
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ for _, c := range data {
+ scan.index++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.endTop = false
+ s.index = 0
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() scanStatus {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = fmt.Errorf("unexpected end of JSON input")
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
+func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus {
+ s.parseState = append(s.parseState, newParseState)
+ if len(s.parseState) <= maxNestingDepth {
+ return successState
+ }
+ return s.error(c, "exceeded max depth")
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ return s.pushParseState(c, parseObjectKey, scanBeginObject)
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ return s.pushParseState(c, parseArrayValue, scanBeginArray)
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) scanStatus {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) scanStatus {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) scanStatus {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) scanStatus {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) scanStatus {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) scanStatus {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) scanStatus {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) scanStatus {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) scanStatus {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) scanStatus {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) scanStatus {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) scanStatus {
+ s.step = stateError
+ s.err = fmt.Errorf("invalid character <<%c>> %s", c, context)
+ return scanError
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
new file mode 100644
index 0000000..068d00f
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
@@ -0,0 +1,163 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // SevenZ matches a 7z archive.
+ SevenZ = prefix([]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C})
+ // Gzip matches gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer.
+ Gzip = prefix([]byte{0x1f, 0x8b})
+ // Fits matches an Flexible Image Transport System file.
+ Fits = prefix([]byte{
+ 0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20, 0x20, 0x3D, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54,
+ })
+ // Xar matches an eXtensible ARchive format file.
+ Xar = prefix([]byte{0x78, 0x61, 0x72, 0x21})
+ // Bz2 matches a bzip2 file.
+ Bz2 = prefix([]byte{0x42, 0x5A, 0x68})
+ // Ar matches an ar (Unix) archive file.
+ Ar = prefix([]byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E})
+ // Deb matches a Debian package file.
+ Deb = offset([]byte{
+ 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D,
+ 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79,
+ }, 8)
+ // Warc matches a Web ARChive file.
+ Warc = prefix([]byte("WARC/1.0"), []byte("WARC/1.1"))
+ // Cab matches a Microsoft Cabinet archive file.
+ Cab = prefix([]byte("MSCF\x00\x00\x00\x00"))
+ // Xz matches an xz compressed stream based on https://tukaani.org/xz/xz-file-format.txt.
+ Xz = prefix([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00})
+ // Lzip matches an Lzip compressed file.
+ Lzip = prefix([]byte{0x4c, 0x5a, 0x49, 0x50})
+ // RPM matches an RPM or Delta RPM package file.
+ RPM = prefix([]byte{0xed, 0xab, 0xee, 0xdb}, []byte("drpm"))
+ // Cpio matches a cpio archive file.
+ Cpio = prefix([]byte("070707"), []byte("070701"), []byte("070702"))
+ // RAR matches a RAR archive file.
+ RAR = prefix([]byte("Rar!\x1A\x07\x00"), []byte("Rar!\x1A\x07\x01\x00"))
+)
+
+// InstallShieldCab matches an InstallShield Cabinet archive file.
+func InstallShieldCab(raw []byte, _ uint32) bool {
+ return len(raw) > 7 &&
+ bytes.Equal(raw[0:4], []byte("ISc(")) &&
+ raw[6] == 0 &&
+ (raw[7] == 1 || raw[7] == 2 || raw[7] == 4)
+}
+
+// Zstd matches a Zstandard archive file.
+// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md
+func Zstd(raw []byte, limit uint32) bool {
+ if len(raw) < 4 {
+ return false
+ }
+ sig := binary.LittleEndian.Uint32(raw)
+ // Check for Zstandard frames and skippable frames.
+ return (sig >= 0xFD2FB522 && sig <= 0xFD2FB528) ||
+ (sig >= 0x184D2A50 && sig <= 0x184D2A5F)
+}
+
+// CRX matches a Chrome extension file: a zip archive prepended by a package header.
+func CRX(raw []byte, limit uint32) bool {
+ const minHeaderLen = 16
+ if len(raw) < minHeaderLen || !bytes.HasPrefix(raw, []byte("Cr24")) {
+ return false
+ }
+ pubkeyLen := binary.LittleEndian.Uint32(raw[8:12])
+ sigLen := binary.LittleEndian.Uint32(raw[12:16])
+ zipOffset := minHeaderLen + pubkeyLen + sigLen
+ if uint32(len(raw)) < zipOffset {
+ return false
+ }
+ return Zip(raw[zipOffset:], limit)
+}
+
+// Tar matches a (t)ape (ar)chive file.
+// Tar files are divided into 512 bytes records. First record contains a 257
+// bytes header padded with NUL.
+func Tar(raw []byte, _ uint32) bool {
+ const sizeRecord = 512
+
+ // The structure of a tar header:
+ // type TarHeader struct {
+ // Name [100]byte
+ // Mode [8]byte
+ // Uid [8]byte
+ // Gid [8]byte
+ // Size [12]byte
+ // Mtime [12]byte
+ // Chksum [8]byte
+ // Linkflag byte
+ // Linkname [100]byte
+ // Magic [8]byte
+ // Uname [32]byte
+ // Gname [32]byte
+ // Devmajor [8]byte
+ // Devminor [8]byte
+ // }
+
+ if len(raw) < sizeRecord {
+ return false
+ }
+ raw = raw[:sizeRecord]
+
+ // First 100 bytes of the header represent the file name.
+ // Check if file looks like Gentoo GLEP binary package.
+ if bytes.Contains(raw[:100], []byte("/gpkg-1\x00")) {
+ return false
+ }
+
+ // Get the checksum recorded into the file.
+ recsum := tarParseOctal(raw[148:156])
+ if recsum == -1 {
+ return false
+ }
+ sum1, sum2 := tarChksum(raw)
+ return recsum == sum1 || recsum == sum2
+}
+
+// tarParseOctal converts octal string to decimal int.
+func tarParseOctal(b []byte) int64 {
+ // Because unused fields are filled with NULs, we need to skip leading NULs.
+ // Fields may also be padded with spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return -1
+ }
+ ret := int64(0)
+ for _, b := range b {
+ if b == 0 {
+ break
+ }
+ if !(b >= '0' && b <= '7') {
+ return -1
+ }
+ ret = (ret << 3) | int64(b-'0')
+ }
+ return ret
+}
+
+// tarChksum computes the checksum for the header block b.
+// The actual checksum is written to same b block after it has been calculated.
+// Before calculation the bytes from b reserved for checksum have placeholder
+// value of ASCII space 0x20.
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
+// signed byte values. We compute and return both.
+func tarChksum(b []byte) (unsigned, signed int64) {
+ for i, c := range b {
+ if 148 <= i && i < 156 {
+ c = ' ' // Treat the checksum field itself as all spaces.
+ }
+ unsigned += int64(c)
+ signed += int64(int8(c))
+ }
+ return unsigned, signed
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
new file mode 100644
index 0000000..d17e324
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
@@ -0,0 +1,76 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // Flac matches a Free Lossless Audio Codec file.
+ Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22"))
+ // Midi matches a Musical Instrument Digital Interface file.
+ Midi = prefix([]byte("\x4D\x54\x68\x64"))
+ // Ape matches a Monkey's Audio file.
+ Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3"))
+ // MusePack matches a Musepack file.
+ MusePack = prefix([]byte("MPCK"))
+ // Au matches a Sun Microsystems au file.
+ Au = prefix([]byte("\x2E\x73\x6E\x64"))
+ // Amr matches an Adaptive Multi-Rate file.
+ Amr = prefix([]byte("\x23\x21\x41\x4D\x52"))
+ // Voc matches a Creative Voice file.
+ Voc = prefix([]byte("Creative Voice File"))
+ // M3u matches a Playlist file.
+ M3u = prefix([]byte("#EXTM3U"))
+ // AAC matches an Advanced Audio Coding file.
+ AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9})
+)
+
+// Mp3 matches an mp3 file.
+func Mp3(raw []byte, limit uint32) bool {
+ if len(raw) < 3 {
+ return false
+ }
+
+ if bytes.HasPrefix(raw, []byte("ID3")) {
+ // MP3s with an ID3v2 tag will start with "ID3"
+ // ID3v1 tags, however appear at the end of the file.
+ return true
+ }
+
+ // Match MP3 files without tags
+ switch binary.BigEndian.Uint16(raw[:2]) & 0xFFFE {
+ case 0xFFFA:
+ // MPEG ADTS, layer III, v1
+ return true
+ case 0xFFF2:
+ // MPEG ADTS, layer III, v2
+ return true
+ case 0xFFE2:
+ // MPEG ADTS, layer III, v2.5
+ return true
+ }
+
+ return false
+}
+
+// Wav matches a Waveform Audio File Format file.
+func Wav(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x41, 0x56, 0x45})
+}
+
+// Aiff matches Audio Interchange File Format file.
+func Aiff(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte{0x46, 0x4F, 0x52, 0x4D}) &&
+ bytes.Equal(raw[8:12], []byte{0x41, 0x49, 0x46, 0x46})
+}
+
+// Qcp matches a Qualcomm Pure Voice file.
+func Qcp(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte("QLCM"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
new file mode 100644
index 0000000..7697320
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
@@ -0,0 +1,203 @@
+package magic
+
+import (
+ "bytes"
+ "debug/macho"
+ "encoding/binary"
+)
+
+var (
+ // Lnk matches Microsoft lnk binary format.
+ Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00})
+ // Wasm matches a web assembly File Format file.
+ Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D})
+ // Exe matches a Windows/DOS executable file.
+ Exe = prefix([]byte{0x4D, 0x5A})
+ // Elf matches an Executable and Linkable Format file.
+ Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46})
+ // Nes matches a Nintendo Entertainment system ROM file.
+ Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A})
+ // SWF matches an Adobe Flash swf file.
+ SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS"))
+ // Torrent has bencoded text in the beginning.
+ Torrent = prefix([]byte("d8:announce"))
+ // PAR1 matches a parquet file.
+ Par1 = prefix([]byte{0x50, 0x41, 0x52, 0x31})
+ // CBOR matches a Concise Binary Object Representation https://cbor.io/
+ CBOR = prefix([]byte{0xD9, 0xD9, 0xF7})
+)
+
+// Java bytecode and Mach-O binaries share the same magic number.
+// More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe
+func classOrMachOFat(in []byte) bool {
+ // There should be at least 8 bytes for both of them because the only way to
+ // quickly distinguish them is by comparing byte at position 7
+ if len(in) < 8 {
+ return false
+ }
+
+ return binary.BigEndian.Uint32(in) == macho.MagicFat
+}
+
+// Class matches a java class file.
+func Class(raw []byte, limit uint32) bool {
+ return classOrMachOFat(raw) && raw[7] > 30
+}
+
+// MachO matches Mach-O binaries format.
+func MachO(raw []byte, limit uint32) bool {
+ if classOrMachOFat(raw) && raw[7] < 0x14 {
+ return true
+ }
+
+ if len(raw) < 4 {
+ return false
+ }
+
+ be := binary.BigEndian.Uint32(raw)
+ le := binary.LittleEndian.Uint32(raw)
+
+ return be == macho.Magic32 ||
+ le == macho.Magic32 ||
+ be == macho.Magic64 ||
+ le == macho.Magic64
+}
+
+// Dbf matches a dBase file.
+// https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm
+func Dbf(raw []byte, limit uint32) bool {
+ if len(raw) < 68 {
+ return false
+ }
+
+ // 3rd and 4th bytes contain the last update month and day of month.
+ if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) {
+ return false
+ }
+
+ // 12, 13, 30, 31 are reserved bytes and always filled with 0x00.
+ if raw[12] != 0x00 || raw[13] != 0x00 || raw[30] != 0x00 || raw[31] != 0x00 {
+ return false
+ }
+ // Production MDX flag;
+ // 0x01 if a production .MDX file exists for this table;
+ // 0x00 if no .MDX file exists.
+ if raw[28] > 0x01 {
+ return false
+ }
+
+ // dbf type is dictated by the first byte.
+ dbfTypes := []byte{
+ 0x02, 0x03, 0x04, 0x05, 0x30, 0x31, 0x32, 0x42, 0x62, 0x7B, 0x82,
+ 0x83, 0x87, 0x8A, 0x8B, 0x8E, 0xB3, 0xCB, 0xE5, 0xF5, 0xF4, 0xFB,
+ }
+ for _, b := range dbfTypes {
+ if raw[0] == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ElfObj matches an object file.
+func ElfObj(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x01 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x01))
+}
+
+// ElfExe matches an executable file.
+func ElfExe(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x02 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x02))
+}
+
+// ElfLib matches a shared library file.
+func ElfLib(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x03 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x03))
+}
+
+// ElfDump matches a core dump file.
+func ElfDump(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x04 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x04))
+}
+
+// Dcm matches a DICOM medical format file.
+func Dcm(raw []byte, limit uint32) bool {
+ return len(raw) > 131 &&
+ bytes.Equal(raw[128:132], []byte{0x44, 0x49, 0x43, 0x4D})
+}
+
+// Marc matches a MARC21 (MAchine-Readable Cataloging) file.
+func Marc(raw []byte, limit uint32) bool {
+ // File is at least 24 bytes ("leader" field size).
+ if len(raw) < 24 {
+ return false
+ }
+
+ // Fixed bytes at offset 20.
+ if !bytes.Equal(raw[20:24], []byte("4500")) {
+ return false
+ }
+
+ // First 5 bytes are ASCII digits.
+ for i := 0; i < 5; i++ {
+ if raw[i] < '0' || raw[i] > '9' {
+ return false
+ }
+ }
+
+ // Field terminator is present in first 2048 bytes.
+ return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E})
+}
+
+// Glb matches a glTF model format file.
+// GLB is the binary file format representation of 3D models saved in
+// the GL transmission Format (glTF).
+// GLB uses little endian and its header structure is as follows:
+//
+// <-- 12-byte header -->
+// | magic | version | length |
+// | (uint32) | (uint32) | (uint32) |
+// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... |
+// | g l T F | 1 | ... |
+//
+// Visit [glTF specification] and [IANA glTF entry] for more details.
+//
+// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html
+// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary
+var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
+ []byte("\x67\x6C\x54\x46\x01\x00\x00\x00"))
+
+// TzIf matches a Time Zone Information Format (TZif) file.
+// See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3
+// Its header structure is shown below:
+//
+// +---------------+---+
+// | magic (4) | <-+-- version (1)
+// +---------------+---+---------------------------------------+
+// | [unused - reserved for future use] (15) |
+// +---------------+---------------+---------------+-----------+
+// | isutccnt (4) | isstdcnt (4) | leapcnt (4) |
+// +---------------+---------------+---------------+
+// | timecnt (4) | typecnt (4) | charcnt (4) |
+func TzIf(raw []byte, limit uint32) bool {
+ // File is at least 44 bytes (header size).
+ if len(raw) < 44 {
+ return false
+ }
+
+ if !bytes.HasPrefix(raw, []byte("TZif")) {
+ return false
+ }
+
+ // Field "typecnt" MUST not be zero.
+ if binary.BigEndian.Uint32(raw[36:40]) == 0 {
+ return false
+ }
+
+ // Version has to be NUL (0x00), '2' (0x32) or '3' (0x33).
+ return raw[4] == 0x00 || raw[4] == 0x32 || raw[4] == 0x33
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
new file mode 100644
index 0000000..cb1fed1
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
@@ -0,0 +1,13 @@
+package magic
+
+var (
+ // Sqlite matches an SQLite database file.
+ Sqlite = prefix([]byte{
+ 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00,
+ })
+ // MsAccessAce matches Microsoft Access dababase file.
+ MsAccessAce = offset([]byte("Standard ACE DB"), 4)
+ // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier).
+ MsAccessMdb = offset([]byte("Standard Jet DB"), 4)
+)
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
new file mode 100644
index 0000000..b3b26d5
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
@@ -0,0 +1,62 @@
+package magic
+
+import "bytes"
+
+var (
+ // Pdf matches a Portable Document Format file.
+ // https://github.com/file/file/blob/11010cc805546a3e35597e67e1129a481aed40e8/magic/Magdir/pdf
+ Pdf = prefix(
+ // usual pdf signature
+ []byte("%PDF-"),
+ // new-line prefixed signature
+ []byte("\012%PDF-"),
+ // UTF-8 BOM prefixed signature
+ []byte("\xef\xbb\xbf%PDF-"),
+ )
+ // Fdf matches a Forms Data Format file.
+ Fdf = prefix([]byte("%FDF"))
+ // Mobi matches a Mobi file.
+ Mobi = offset([]byte("BOOKMOBI"), 60)
+ // Lit matches a Microsoft Lit file.
+ Lit = prefix([]byte("ITOLITLS"))
+)
+
+// DjVu matches a DjVu file.
+func DjVu(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ if !bytes.HasPrefix(raw, []byte{0x41, 0x54, 0x26, 0x54, 0x46, 0x4F, 0x52, 0x4D}) {
+ return false
+ }
+ return bytes.HasPrefix(raw[12:], []byte("DJVM")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVU")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVI")) ||
+ bytes.HasPrefix(raw[12:], []byte("THUM"))
+}
+
+// P7s matches an .p7s signature File (PEM, Base64).
+func P7s(raw []byte, limit uint32) bool {
+ // Check for PEM Encoding.
+ if bytes.HasPrefix(raw, []byte("-----BEGIN PKCS7")) {
+ return true
+ }
+ // Check if DER Encoding is long enough.
+ if len(raw) < 20 {
+ return false
+ }
+ // Magic Bytes for the signedData ASN.1 encoding.
+ startHeader := [][]byte{{0x30, 0x80}, {0x30, 0x81}, {0x30, 0x82}, {0x30, 0x83}, {0x30, 0x84}}
+ signedDataMatch := []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07}
+ // Check if Header is correct. There are multiple valid headers.
+ for i, match := range startHeader {
+ // If first bytes match, then check for ASN.1 Object Type.
+ if bytes.HasPrefix(raw, match) {
+ if bytes.HasPrefix(raw[i+2:], signedDataMatch) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
new file mode 100644
index 0000000..43af282
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
@@ -0,0 +1,39 @@
+package magic
+
+import (
+ "bytes"
+)
+
+var (
+ // Woff matches a Web Open Font Format file.
+ Woff = prefix([]byte("wOFF"))
+ // Woff2 matches a Web Open Font Format version 2 file.
+ Woff2 = prefix([]byte("wOF2"))
+ // Otf matches an OpenType font file.
+ Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00})
+)
+
+// Ttf matches a TrueType font file.
+func Ttf(raw []byte, limit uint32) bool {
+ if !bytes.HasPrefix(raw, []byte{0x00, 0x01, 0x00, 0x00}) {
+ return false
+ }
+ return !MsAccessAce(raw, limit) && !MsAccessMdb(raw, limit)
+}
+
+// Eot matches an Embedded OpenType font file.
+func Eot(raw []byte, limit uint32) bool {
+ return len(raw) > 35 &&
+ bytes.Equal(raw[34:36], []byte{0x4C, 0x50}) &&
+ (bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x01}) ||
+ bytes.Equal(raw[8:11], []byte{0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x02}))
+}
+
+// Ttc matches a TrueType Collection font file.
+func Ttc(raw []byte, limit uint32) bool {
+ return len(raw) > 7 &&
+ bytes.HasPrefix(raw, []byte("ttcf")) &&
+ (bytes.Equal(raw[4:8], []byte{0x00, 0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[4:8], []byte{0x00, 0x02, 0x00, 0x00}))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
new file mode 100644
index 0000000..ac72713
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
@@ -0,0 +1,109 @@
+package magic
+
+import (
+ "bytes"
+)
+
+var (
+ // AVIF matches an AV1 Image File Format still or animated.
+ // Wikipedia page seems outdated listing image/avif-sequence for animations.
+ // https://github.com/AOMediaCodec/av1-avif/issues/59
+ AVIF = ftyp([]byte("avif"), []byte("avis"))
+ // ThreeGP matches a 3GPP file.
+ ThreeGP = ftyp(
+ []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"),
+ []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"),
+ []byte("3ge6"), []byte("3ge7"), []byte("3gg6"),
+ )
+ // ThreeG2 matches a 3GPP2 file.
+ ThreeG2 = ftyp(
+ []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"),
+ []byte("3g2b"), []byte("3g2c"), []byte("KDDI"),
+ )
+ // AMp4 matches an audio MP4 file.
+ AMp4 = ftyp(
+ // audio for Adobe Flash Player 9+
+ []byte("F4A "), []byte("F4B "),
+ // Apple iTunes AAC-LC (.M4A) Audio
+ []byte("M4B "), []byte("M4P "),
+ // MPEG-4 (.MP4) for SonyPSP
+ []byte("MSNV"),
+ // Nero Digital AAC Audio
+ []byte("NDAS"),
+ )
+ // Mqv matches a Sony / Mobile QuickTime file.
+ Mqv = ftyp([]byte("mqt "))
+ // M4a matches an audio M4A file.
+ M4a = ftyp([]byte("M4A "))
+ // M4v matches an Appl4 M4V video file.
+ M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP"))
+ // Heic matches a High Efficiency Image Coding (HEIC) file.
+ Heic = ftyp([]byte("heic"), []byte("heix"))
+ // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence.
+ HeicSequence = ftyp([]byte("hevc"), []byte("hevx"))
+ // Heif matches a High Efficiency Image File Format (HEIF) file.
+ Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic"))
+ // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence.
+ HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs"))
+ // Mj2 matches a Motion JPEG 2000 file: https://en.wikipedia.org/wiki/Motion_JPEG_2000.
+ Mj2 = ftyp([]byte("mj2s"), []byte("mjp2"), []byte("MFSM"), []byte("MGSV"))
+ // Dvb matches a Digital Video Broadcasting file: https://dvb.org.
+ // https://cconcolato.github.io/mp4ra/filetype.html
+ // https://github.com/file/file/blob/512840337ead1076519332d24fefcaa8fac36e06/magic/Magdir/animation#L135-L154
+ Dvb = ftyp(
+ []byte("dby1"), []byte("dsms"), []byte("dts1"), []byte("dts2"),
+ []byte("dts3"), []byte("dxo "), []byte("dmb1"), []byte("dmpf"),
+ []byte("drc1"), []byte("dv1a"), []byte("dv1b"), []byte("dv2a"),
+ []byte("dv2b"), []byte("dv3a"), []byte("dv3b"), []byte("dvr1"),
+ []byte("dvt1"), []byte("emsg"))
+ // TODO: add support for remaining video formats at ftyps.com.
+)
+
+// QuickTime matches a QuickTime File Format file.
+// https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml
+// https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html#//apple_ref/doc/uid/TP40000939-CH203-38190
+// https://github.com/apache/tika/blob/0f5570691133c75ac4472c3340354a6c4080b104/tika-core/src/main/resources/org/apache/tika/mime/tika-mimetypes.xml#L7758-L7777
+func QuickTime(raw []byte, _ uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ // First 4 bytes represent the size of the atom as unsigned int.
+ // Next 4 bytes are the type of the atom.
+ // For `ftyp` atoms check if first byte in size is 0, otherwise, a text file
+ // which happens to contain 'ftypqt ' at index 4 will trigger a false positive.
+ if bytes.Equal(raw[4:12], []byte("ftypqt ")) ||
+ bytes.Equal(raw[4:12], []byte("ftypmoov")) {
+ return raw[0] == 0x00
+ }
+ basicAtomTypes := [][]byte{
+ []byte("moov\x00"),
+ []byte("mdat\x00"),
+ []byte("free\x00"),
+ []byte("skip\x00"),
+ []byte("pnot\x00"),
+ }
+ for _, a := range basicAtomTypes {
+ if bytes.Equal(raw[4:9], a) {
+ return true
+ }
+ }
+ return bytes.Equal(raw[:8], []byte("\x00\x00\x00\x08wide"))
+}
+
+// Mp4 detects an .mp4 file. Mp4 detections only does a basic ftyp check.
+// Mp4 has many registered and unregistered code points so it's hard to keep track
+// of all. Detection will default on video/mp4 for all ftyp files.
+// ISO_IEC_14496-12 is the specification for the iso container.
+func Mp4(raw []byte, _ uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ // ftyps are made out of boxes. The first 4 bytes of the box represent
+ // its size in big-endian uint32. First box is the ftyp box and it is small
+ // in size. Check most significant byte is 0 to filter out false positive
+ // text files that happen to contain the string "ftyp" at index 4.
+ if raw[0] != 0 {
+ return false
+ }
+ return bytes.Equal(raw[4:8], []byte("ftyp"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
new file mode 100644
index 0000000..f077e16
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
@@ -0,0 +1,55 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Shp matches a shape format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shp(raw []byte, limit uint32) bool {
+ if len(raw) < 112 {
+ return false
+ }
+
+ if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 &&
+ binary.BigEndian.Uint32(raw[4:8]) == 0 &&
+ binary.BigEndian.Uint32(raw[8:12]) == 0 &&
+ binary.BigEndian.Uint32(raw[12:16]) == 0 &&
+ binary.BigEndian.Uint32(raw[16:20]) == 0 &&
+ binary.BigEndian.Uint32(raw[20:24]) == 0 &&
+ binary.LittleEndian.Uint32(raw[28:32]) == 1000) {
+ return false
+ }
+
+ shapeTypes := []int{
+ 0, // Null shape
+ 1, // Point
+ 3, // Polyline
+ 5, // Polygon
+ 8, // MultiPoint
+ 11, // PointZ
+ 13, // PolylineZ
+ 15, // PolygonZ
+ 18, // MultiPointZ
+ 21, // PointM
+ 23, // PolylineM
+ 25, // PolygonM
+ 28, // MultiPointM
+ 31, // MultiPatch
+ }
+
+ for _, st := range shapeTypes {
+ if st == int(binary.LittleEndian.Uint32(raw[108:112])) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Shx matches a shape index format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shx(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x27, 0x0A})
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
new file mode 100644
index 0000000..0eb7e95
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
@@ -0,0 +1,110 @@
+package magic
+
+import "bytes"
+
+var (
+ // Png matches a Portable Network Graphics file.
+ // https://www.w3.org/TR/PNG/
+ Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A})
+ // Apng matches an Animated Portable Network Graphics file.
+ // https://wiki.mozilla.org/APNG_Specification
+ Apng = offset([]byte("acTL"), 37)
+ // Jpg matches a Joint Photographic Experts Group file.
+ Jpg = prefix([]byte{0xFF, 0xD8, 0xFF})
+ // Jp2 matches a JPEG 2000 Image file (ISO 15444-1).
+ Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20})
+ // Jpx matches a JPEG 2000 Image file (ISO 15444-2).
+ Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20})
+ // Jpm matches a JPEG 2000 Image file (ISO 15444-6).
+ Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20})
+ // Gif matches a Graphics Interchange Format file.
+ Gif = prefix([]byte("GIF87a"), []byte("GIF89a"))
+ // Bmp matches a bitmap image file.
+ Bmp = prefix([]byte{0x42, 0x4D})
+ // Ps matches a PostScript file.
+ Ps = prefix([]byte("%!PS-Adobe-"))
+ // Psd matches a Photoshop Document file.
+ Psd = prefix([]byte("8BPS"))
+ // Ico matches an ICO file.
+ Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00})
+ // Icns matches an ICNS (Apple Icon Image format) file.
+ Icns = prefix([]byte("icns"))
+ // Tiff matches a Tagged Image File Format file.
+ Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A})
+ // Bpg matches a Better Portable Graphics file.
+ Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB})
+ // Xcf matches GIMP image data.
+ Xcf = prefix([]byte("gimp xcf"))
+ // Pat matches GIMP pattern data.
+ Pat = offset([]byte("GPAT"), 20)
+ // Gbr matches GIMP brush data.
+ Gbr = offset([]byte("GIMP"), 20)
+ // Hdr matches Radiance HDR image.
+ // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/
+ Hdr = prefix([]byte("#?RADIANCE\n"))
+ // Xpm matches X PixMap image data.
+ Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F})
+ // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3).
+ Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A})
+ // Jxr matches Microsoft HD JXR photo file.
+ Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01})
+)
+
+func jpeg2k(sig []byte) Detector {
+ return func(raw []byte, _ uint32) bool {
+ if len(raw) < 24 {
+ return false
+ }
+
+ if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) &&
+ !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) {
+ return false
+ }
+ return bytes.Equal(raw[20:24], sig)
+ }
+}
+
+// Webp matches a WebP file.
+func Webp(raw []byte, _ uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[0:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x45, 0x42, 0x50})
+}
+
+// Dwg matches a CAD drawing file.
+func Dwg(raw []byte, _ uint32) bool {
+ if len(raw) < 6 || raw[0] != 0x41 || raw[1] != 0x43 {
+ return false
+ }
+ dwgVersions := [][]byte{
+ {0x31, 0x2E, 0x34, 0x30},
+ {0x31, 0x2E, 0x35, 0x30},
+ {0x32, 0x2E, 0x31, 0x30},
+ {0x31, 0x30, 0x30, 0x32},
+ {0x31, 0x30, 0x30, 0x33},
+ {0x31, 0x30, 0x30, 0x34},
+ {0x31, 0x30, 0x30, 0x36},
+ {0x31, 0x30, 0x30, 0x39},
+ {0x31, 0x30, 0x31, 0x32},
+ {0x31, 0x30, 0x31, 0x34},
+ {0x31, 0x30, 0x31, 0x35},
+ {0x31, 0x30, 0x31, 0x38},
+ {0x31, 0x30, 0x32, 0x31},
+ {0x31, 0x30, 0x32, 0x34},
+ {0x31, 0x30, 0x33, 0x32},
+ }
+
+ for _, d := range dwgVersions {
+ if bytes.Equal(raw[2:6], d) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Jxl matches JPEG XL image file.
+func Jxl(raw []byte, _ uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) ||
+ bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a"))
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
new file mode 100644
index 0000000..a34c609
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
@@ -0,0 +1,251 @@
+// Package magic holds the matching functions used to find MIME types.
+package magic
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type (
+ // Detector receiveѕ the raw data of a file and returns whether the data
+ // meets any conditions. The limit parameter is an upper limit to the number
+ // of bytes received and is used to tell if the byte slice represents the
+ // whole file or is just the header of a file: len(raw) < limit or len(raw)>limit.
+ Detector func(raw []byte, limit uint32) bool
+ xmlSig struct {
+ // the local name of the root tag
+ localName []byte
+ // the namespace of the XML document
+ xmlns []byte
+ }
+)
+
+// prefix creates a Detector which returns true if any of the provided signatures
+// is the prefix of the raw input.
+func prefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if bytes.HasPrefix(raw, s) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// offset creates a Detector which returns true if the provided signature can be
+// found at offset in the raw input.
+func offset(sig []byte, offset int) Detector {
+ return func(raw []byte, limit uint32) bool {
+ return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig)
+ }
+}
+
+// ciPrefix is like prefix but the check is case insensitive.
+func ciPrefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if ciCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func ciCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+
+ return true
+}
+
+// xml creates a Detector which returns true if any of the provided XML signatures
+// matches the raw input.
+func xml(sigs ...xmlSig) Detector {
+ return func(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if xmlCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func xmlCheck(sig xmlSig, raw []byte) bool {
+ raw = raw[:min(len(raw), 512)]
+
+ if len(sig.localName) == 0 {
+ return bytes.Index(raw, sig.xmlns) > 0
+ }
+ if len(sig.xmlns) == 0 {
+ return bytes.Index(raw, sig.localName) > 0
+ }
+
+ localNameIndex := bytes.Index(raw, sig.localName)
+ return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns)
+}
+
+// markup creates a Detector which returns true is any of the HTML signatures
+// matches the raw input.
+func markup(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if bytes.HasPrefix(raw, []byte{0xEF, 0xBB, 0xBF}) {
+ // We skip the UTF-8 BOM if present to ensure we correctly
+ // process any leading whitespace. The presence of the BOM
+ // is taken into account during charset detection in charset.go.
+ raw = trimLWS(raw[3:])
+ } else {
+ raw = trimLWS(raw)
+ }
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if markupCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func markupCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+ // Next byte must be space or right angle bracket.
+ if db := raw[len(sig)]; db != ' ' && db != '>' {
+ return false
+ }
+
+ return true
+}
+
+// ftyp creates a Detector which returns true if any of the FTYP signatures
+// matches the raw input.
+func ftyp(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ for _, s := range sigs {
+ if bytes.Equal(raw[8:12], s) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func newXMLSig(localName, xmlns string) xmlSig {
+ ret := xmlSig{xmlns: []byte(xmlns)}
+ if localName != "" {
+ ret.localName = []byte(fmt.Sprintf("<%s", localName))
+ }
+
+ return ret
+}
+
+// A valid shebang starts with the "#!" characters,
+// followed by any number of spaces,
+// followed by the path to the interpreter,
+// and, optionally, followed by the arguments for the interpreter.
+//
+// Ex:
+//
+// #! /usr/bin/env php
+//
+// /usr/bin/env is the interpreter, php is the first and only argument.
+func shebang(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if shebangCheck(s, firstLine(raw)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func shebangCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+2 {
+ return false
+ }
+ if raw[0] != '#' || raw[1] != '!' {
+ return false
+ }
+
+ return bytes.Equal(trimLWS(trimRWS(raw[2:])), sig)
+}
+
+// trimLWS trims whitespace from beginning of the input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+// trimRWS trims whitespace from the end of the input.
+func trimRWS(in []byte) []byte {
+ lastNonWS := len(in) - 1
+ for ; lastNonWS > 0 && isWS(in[lastNonWS]); lastNonWS-- {
+ }
+
+ return in[:lastNonWS+1]
+}
+
+func firstLine(in []byte) []byte {
+ lineEnd := 0
+ for ; lineEnd < len(in) && in[lineEnd] != '\n'; lineEnd++ {
+ }
+
+ return in[:lineEnd]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+type readBuf []byte
+
+func (b *readBuf) advance(n int) bool {
+ if n < 0 || len(*b) < n {
+ return false
+ }
+ *b = (*b)[n:]
+ return true
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
new file mode 100644
index 0000000..7d60e22
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
@@ -0,0 +1,186 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Xlsx matches a Microsoft Excel 2007 file.
+func Xlsx(raw []byte, limit uint32) bool {
+ return zipContains(raw, []byte("xl/"), true)
+}
+
+// Docx matches a Microsoft Word 2007 file.
+func Docx(raw []byte, limit uint32) bool {
+ return zipContains(raw, []byte("word/"), true)
+}
+
+// Pptx matches a Microsoft PowerPoint 2007 file.
+func Pptx(raw []byte, limit uint32) bool {
+ return zipContains(raw, []byte("ppt/"), true)
+}
+
+// Ole matches an Open Linking and Embedding file.
+//
+// https://en.wikipedia.org/wiki/Object_Linking_and_Embedding
+func Ole(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
+}
+
+// Aaf matches an Advanced Authoring Format file.
+// See: https://pyaaf.readthedocs.io/en/latest/about.html
+// See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format
+func Aaf(raw []byte, limit uint32) bool {
+ if len(raw) < 31 {
+ return false
+ }
+ return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) &&
+ (raw[30] == 0x09 || raw[30] == 0x0C)
+}
+
+// Doc matches a Microsoft Word 97-2003 file.
+// See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py
+func Doc(raw []byte, _ uint32) bool {
+ clsids := [][]byte{
+ // Microsoft Word 97-2003 Document (Word.Document.8)
+ {0x06, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word 6.0-7.0 Document (Word.Document.6)
+ {0x00, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word Picture (Word.Picture.8)
+ {0x07, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ }
+
+ for _, clsid := range clsids {
+ if matchOleClsid(raw, clsid) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Ppt matches a Microsoft PowerPoint 97-2003 file or a PowerPoint 95 presentation.
+func Ppt(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x8d, 0x81, 0x64, 0x9b, 0x4f, 0xcf, 0x11,
+ 0x86, 0xea, 0x00, 0xaa, 0x00, 0xb9, 0x29, 0xe8,
+ }) || matchOleClsid(raw, []byte{
+ 0x70, 0xae, 0x7b, 0xea, 0x3b, 0xfb, 0xcd, 0x11,
+ 0xa9, 0x03, 0x00, 0xaa, 0x00, 0x51, 0x0e, 0xa3,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ pptSubHeaders := [][]byte{
+ {0xA0, 0x46, 0x1D, 0xF0},
+ {0x00, 0x6E, 0x1E, 0xF0},
+ {0x0F, 0x00, 0xE8, 0x03},
+ }
+ for _, h := range pptSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ if bytes.HasPrefix(raw[512:], []byte{0xFD, 0xFF, 0xFF, 0xFF}) &&
+ raw[518] == 0x00 && raw[519] == 0x00 {
+ return true
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("P\x00o\x00w\x00e\x00r\x00P\x00o\x00i\x00n\x00t\x00 D\x00o\x00c\x00u\x00m\x00e\x00n\x00t"))
+}
+
+// Xls matches a Microsoft Excel 97-2003 file.
+func Xls(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) || matchOleClsid(raw, []byte{
+ 0x20, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ xlsSubHeaders := [][]byte{
+ {0x09, 0x08, 0x10, 0x00, 0x00, 0x06, 0x05, 0x00},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x10},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x1F},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x22},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x23},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x28},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x29},
+ }
+ for _, h := range xlsSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("W\x00k\x00s\x00S\x00S\x00W\x00o\x00r\x00k\x00B\x00o\x00o\x00k"))
+}
+
+// Pub matches a Microsoft Publisher file.
+func Pub(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x01, 0x12, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msg matches a Microsoft Outlook email file.
+func Msg(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x0B, 0x0D, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msi matches a Microsoft Windows Installer file.
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func Msi(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x84, 0x10, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Helper to match by a specific CLSID of a compound file.
+//
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func matchOleClsid(in []byte, clsid []byte) bool {
+ // Microsoft Compound files v3 have a sector length of 512, while v4 has 4096.
+ // Change sector offset depending on file version.
+ // https://www.loc.gov/preservation/digital/formats/fdd/fdd000392.shtml
+ sectorLength := 512
+ if len(in) < sectorLength {
+ return false
+ }
+ if in[26] == 0x04 && in[27] == 0x00 {
+ sectorLength = 4096
+ }
+
+ // SecID of first sector of the directory stream.
+ firstSecID := int(binary.LittleEndian.Uint32(in[48:52]))
+
+ // Expected offset of CLSID for root storage object.
+ clsidOffset := sectorLength*(1+firstSecID) + 80
+
+ if len(in) <= clsidOffset+16 {
+ return false
+ }
+
+ return bytes.HasPrefix(in[clsidOffset:], clsid)
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
new file mode 100644
index 0000000..bb4cd78
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
@@ -0,0 +1,42 @@
+package magic
+
+import (
+ "bytes"
+)
+
+/*
+ NOTE:
+
+ In May 2003, two Internet RFCs were published relating to the format.
+ The Ogg bitstream was defined in RFC 3533 (which is classified as
+ 'informative') and its Internet content type (application/ogg) in RFC
+ 3534 (which is, as of 2006, a proposed standard protocol). In
+ September 2008, RFC 3534 was obsoleted by RFC 5334, which added
+ content types video/ogg, audio/ogg and filename extensions .ogx, .ogv,
+ .oga, .spx.
+
+ See:
+ https://tools.ietf.org/html/rfc3533
+ https://developer.mozilla.org/en-US/docs/Web/HTTP/Configuring_servers_for_Ogg_media#Serve_media_with_the_correct_MIME_type
+ https://github.com/file/file/blob/master/magic/Magdir/vorbis
+*/
+
+// Ogg matches an Ogg file.
+func Ogg(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte("\x4F\x67\x67\x53\x00"))
+}
+
+// OggAudio matches an audio ogg file.
+func OggAudio(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x7fFLAC")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01vorbis")) ||
+ bytes.HasPrefix(raw[28:], []byte("OpusHead")) ||
+ bytes.HasPrefix(raw[28:], []byte("Speex\x20\x20\x20")))
+}
+
+// OggVideo matches a video ogg file.
+func OggVideo(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x80theora")) ||
+ bytes.HasPrefix(raw[28:], []byte("fishead\x00")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01video\x00\x00\x00"))) // OGM video
+}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
new file mode 100644
index 0000000..cf64463
--- /dev/null
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
@@ -0,0 +1,381 @@
+package magic
+
+import (
+ "bytes"
+ "strings"
+ "time"
+
+ "github.com/gabriel-vasile/mimetype/internal/charset"
+ "github.com/gabriel-vasile/mimetype/internal/json"
+)
+
+var (
+ // HTML matches a Hypertext Markup Language file.
+ HTML = markup(
+ []byte(" 0
+}
+
+// GeoJSON matches a RFC 7946 GeoJSON file.
+//
+// GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"`
+// in the input.
+// BUG(gabriel-vasile): The "type" key should be searched for in the root object.
+func GeoJSON(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ // GeoJSON is always a JSON object, not a JSON array or any other JSON value.
+ if raw[0] != '{' {
+ return false
+ }
+
+ s := []byte(`"type"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "type" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "type" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ geoJSONTypes := [][]byte{
+ []byte(`"Feature"`),
+ []byte(`"FeatureCollection"`),
+ []byte(`"Point"`),
+ []byte(`"LineString"`),
+ []byte(`"Polygon"`),
+ []byte(`"MultiPoint"`),
+ []byte(`"MultiLineString"`),
+ []byte(`"MultiPolygon"`),
+ []byte(`"GeometryCollection"`),
+ }
+ for _, t := range geoJSONTypes {
+ if bytes.HasPrefix(raw, t) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// NdJSON matches a Newline delimited JSON file. All complete lines from raw
+// must be valid JSON documents meaning they contain one of the valid JSON data
+// types.
+func NdJSON(raw []byte, limit uint32) bool {
+ lCount, hasObjOrArr := 0, false
+ raw = dropLastLine(raw, limit)
+ var l []byte
+ for len(raw) != 0 {
+ l, raw = scanLine(raw)
+ // Empty lines are allowed in NDJSON.
+ if l = trimRWS(trimLWS(l)); len(l) == 0 {
+ continue
+ }
+ _, err := json.Scan(l)
+ if err != nil {
+ return false
+ }
+ if l[0] == '[' || l[0] == '{' {
+ hasObjOrArr = true
+ }
+ lCount++
+ }
+
+ return lCount > 1 && hasObjOrArr
+}
+
+// HAR matches a HAR Spec file.
+// Spec: http://www.softwareishard.com/blog/har-12-spec/
+func HAR(raw []byte, limit uint32) bool {
+ s := []byte(`"log"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "log" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "log" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ harJSONTypes := [][]byte{
+ []byte(`"version"`),
+ []byte(`"creator"`),
+ []byte(`"entries"`),
+ }
+ for _, t := range harJSONTypes {
+ si := bytes.Index(raw, t)
+ if si > -1 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Svg matches a SVG file.
+func Svg(raw []byte, limit uint32) bool {
+ return bytes.Contains(raw, []byte("?db=
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is interpreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query parameters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Examples:
+//
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ h, p := getHostPortWithDefaults(u)
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ var err error
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{
+ ServerName: h,
+ MinVersion: tls.VersionTLS12,
+ }
+ }
+
+ return setupConnParams(u, o)
+}
+
+// getHostPortWithDefaults is a helper function that splits the url into
+// a host and a port. If the host is missing, it defaults to localhost
+// and if the port is missing, it defaults to 6379.
+func getHostPortWithDefaults(u *url.URL) (string, string) {
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ if port == "" {
+ port = "6379"
+ }
+ return host, port
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+ o.Username, o.Password = getUserPassword(u)
+ return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+ q url.Values
+ err error
+}
+
+func (o *queryOptions) has(name string) bool {
+ return len(o.q[name]) > 0
+}
+
+func (o *queryOptions) string(name string) string {
+ vs := o.q[name]
+ if len(vs) == 0 {
+ return ""
+ }
+ delete(o.q, name) // enable detection of unknown parameters
+ return vs[len(vs)-1]
+}
+
+func (o *queryOptions) strings(name string) []string {
+ vs := o.q[name]
+ delete(o.q, name)
+ return vs
+}
+
+func (o *queryOptions) int(name string) int {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ i, err := strconv.Atoi(s)
+ if err == nil {
+ return i
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ // try plain number first
+ if i, err := strconv.Atoi(s); err == nil {
+ if i <= 0 {
+ // disable timeouts
+ return -1
+ }
+ return time.Duration(i) * time.Second
+ }
+ dur, err := time.ParseDuration(s)
+ if err == nil {
+ return dur
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+ switch s := o.string(name); s {
+ case "true", "1":
+ return true
+ case "false", "0", "":
+ return false
+ default:
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+ }
+ return false
+ }
+}
+
+func (o *queryOptions) remaining() []string {
+ if len(o.q) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(o.q))
+ for k := range o.q {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+ q := queryOptions{q: u.Query()}
+
+ // compat: a future major release may use q.int("db")
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+ }
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ if q.has("conn_max_idle_time") {
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ } else {
+ o.ConnMaxIdleTime = q.duration("idle_timeout")
+ }
+ if q.has("conn_max_lifetime") {
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ } else {
+ o.ConnMaxLifetime = q.duration("max_conn_age")
+ }
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(
+ opt *Options,
+ dialer func(ctx context.Context, network, addr string) (net.Conn, error),
+) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return dialer(ctx, opt.Network, opt.Addr)
+ },
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ })
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go
new file mode 100644
index 0000000..9268120
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster.go
@@ -0,0 +1,1949 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "net/url"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ CredentialsProvider func() (username string, password string)
+ CredentialsProviderContext func(ctx context.Context) (username string, password string, err error)
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int // applies per cluster node and not for the whole cluster
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+ DisableIndentity bool // Disable set-lib on connect. Default is false.
+
+ IdentitySuffix string // Add suffix to client name. Default is empty.
+
+ // UnstableResp3 enables Unstable mode for Redis Search module with RESP3.
+ UnstableResp3 bool
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 3
+ }
+
+ if opt.RouteByLatency || opt.RouteRandomly {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://:@:
+// or
+// rediss://:@:
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://:@:?addr=:&addr=:
+// or
+// rediss://:@:?addr=:&addr=:
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is interpreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query parameters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ CredentialsProvider: opt.CredentialsProvider,
+ CredentialsProviderContext: opt.CredentialsProviderContext,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+const maximumNodeLatency = 1 * time.Minute
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ successes := 0
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
+ }
+
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((maximumNodeLatency) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+ onNewNode []func(rdb *Client)
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+
+ c.mu.RLock()
+ closed := c.closed //nolint:ifshort
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = make([]string, len(c.activeAddrs))
+ copy(addrs, c.activeAddrs)
+ } else {
+ addrs = make([]string, len(c.addrs))
+ copy(addrs, c.addrs)
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var allNodesFailing = true
+ var (
+ closestNonFailingNode *clusterNode
+ closestNode *clusterNode
+ minLatency time.Duration
+ )
+
+ // setting the max possible duration as zerovalue for minlatency
+ minLatency = time.Duration(math.MaxInt64)
+
+ for _, n := range nodes {
+ if closestNode == nil || n.Latency() < minLatency {
+ closestNode = n
+ minLatency = n.Latency()
+ if !n.Failing() {
+ closestNonFailingNode = n
+ allNodesFailing = false
+ }
+ }
+ }
+
+ // pick the healthly node with the lowest latency
+ if !allNodesFailing && closestNonFailingNode != nil {
+ return closestNonFailingNode, nil
+ }
+
+ // if all nodes are failing, we will pick the temporarily failing node with lowest latency
+ if minLatency < maximumNodeLatency && closestNode != nil {
+ internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency")
+ return closestNode, nil
+ }
+
+ // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster
+ internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster")
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ if len(nodes) == 1 {
+ return nodes[0], nil
+ }
+ randomNodes := rand.Perm(len(nodes))
+ for _, idx := range randomNodes {
+ if node := nodes[idx]; !node.Failing() {
+ return node, nil
+ }
+ }
+ return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(context.Background())
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v == nil {
+ return c.Reload(ctx)
+ }
+
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload()
+ }
+ return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
+ cmdable
+ hooksMixin
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ }
+
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
+
+ return c
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ slot := c.cmdSlot(ctx, cmd)
+ var node *clusterNode
+ var moved bool
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ // MOVED and ASK responses are not transient errors that require retry delay; they
+ // should be attempted immediately.
+ if attempt > 0 && !moved && !ask {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmd.Name(), slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ ask = false
+
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ c.state.LazyReload()
+
+ var err error
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ node.MarkAsFailing()
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if isBadConn(err, false, node.Client.getAddr()) {
+ node.MarkAsFailing()
+ }
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isBadConn(err, false, node.Client.getAddr()) {
+ node.MarkAsFailing()
+ }
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ return err
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload()
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(ctx, cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
+ }
+
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload()
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdName string,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.opt.ReadOnly {
+ cmdInfo := c.cmdInfo(ctx, cmdName)
+ if cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster_commands.go b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
new file mode 100644
index 0000000..b13f8e7
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = size
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptLoad(ctx, script).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ if cmd.Val() == "" {
+ cmd.val = val
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ return shard.ScriptFlush(ctx).Err()
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+
+ result := make([]bool, len(hashes))
+ for i := range result {
+ result[i] = true
+ }
+
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptExists(ctx, hashes...).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ for i, v := range val {
+ result[i] = result[i] && v
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = result
+ }
+ return nil
+ })
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pipeline.go b/vendor/github.com/redis/go-redis/v9/pipeline.go
new file mode 100644
index 0000000..1c11420
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pipeline.go
@@ -0,0 +1,121 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// single step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+
+ // Len is to obtain the number of commands in the pipeline that have not yet been executed.
+ Len() int
+
+ // Do is an API for executing any command.
+ // If a certain Redis command is not yet supported, you can use Do to execute it.
+ Do(ctx context.Context, args ...interface{}) *Cmd
+
+ // Process is to put the commands to be executed into the pipeline buffer.
+ Process(ctx context.Context, cmd Cmder) error
+
+ // Discard is to discard all commands in the cache that have not yet been executed.
+ Discard()
+
+ // Exec is to send all the commands buffered in the pipeline to the redis-server.
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining.
+// Please note: it is not safe for concurrent use by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ exec pipelineExecer
+ cmds []Cmder
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+ return len(c.cmds)
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ if len(args) == 0 {
+ cmd.SetErr(errors.New("redis: please enter the command to be executed"))
+ return cmd
+ }
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.cmds = append(c.cmds, cmd)
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() {
+ c.cmds = c.cmds[:0]
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ return c.Exec(ctx)
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go
new file mode 100644
index 0000000..5d5cd1a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go
@@ -0,0 +1,1429 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type ProbabilisticCmdable interface {
+ BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFCard(ctx context.Context, key string) *IntCmd
+ BFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFInfo(ctx context.Context, key string) *BFInfoCmd
+ BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd
+ BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd
+ BFInfoSize(ctx context.Context, key string) *BFInfoCmd
+ BFInfoFilters(ctx context.Context, key string) *BFInfoCmd
+ BFInfoItems(ctx context.Context, key string) *BFInfoCmd
+ BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd
+ BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd
+ BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd
+ BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFCount(ctx context.Context, key string, element interface{}) *IntCmd
+ CFDel(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFInfo(ctx context.Context, key string) *CFInfoCmd
+ CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd
+ CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd
+ CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd
+ CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd
+ CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd
+ CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd
+ CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ CMSInfo(ctx context.Context, key string) *CMSInfoCmd
+ CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd
+ CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd
+ CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd
+ CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd
+ CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+
+ TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKInfo(ctx context.Context, key string) *TopKInfoCmd
+ TopKList(ctx context.Context, key string) *StringSliceCmd
+ TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd
+ TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ TopKReserve(ctx context.Context, key string, k int64) *StatusCmd
+ TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd
+
+ TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd
+ TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestCreate(ctx context.Context, key string) *StatusCmd
+ TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd
+ TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd
+ TDigestMax(ctx context.Context, key string) *FloatCmd
+ TDigestMin(ctx context.Context, key string) *FloatCmd
+ TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd
+ TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestReset(ctx context.Context, key string) *StatusCmd
+ TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd
+}
+
+type BFInsertOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+ NoCreate bool
+}
+
+type BFReserveOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+}
+
+type CFReserveOptions struct {
+ Capacity int64
+ BucketSize int64
+ MaxIterations int64
+ Expansion int64
+}
+
+type CFInsertOptions struct {
+ Capacity int64
+ NoCreate bool
+}
+
+// -------------------------------------------
+// Bloom filter commands
+//-------------------------------------------
+
+// BFReserve creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveExpansion creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying an expansion rate for the filter.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying that the filter should not scale.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveWithArgs creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying additional options such as expansion rate and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key}
+ if options != nil {
+ args = append(args, options.Error, options.Capacity)
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFAdd adds an item to a Bloom filter.
+// For more information - https://redis.io/commands/bf.add/
+func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFCard returns the cardinality of a Bloom filter -
+// number of items that were added to a Bloom filter and detected as unique
+// (items that caused at least one bit to be set in at least one sub-filter).
+// For more information - https://redis.io/commands/bf.card/
+func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd {
+ args := []interface{}{"BF.CARD", key}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFExists determines whether a given item was added to a Bloom filter.
+// For more information - https://redis.io/commands/bf.exists/
+func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP.
+// For more information - https://redis.io/commands/bf.loadchunk/
+func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"BF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Begins an incremental save of the Bloom filter.
+// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model.
+// For more information - https://redis.io/commands/bf.scandump/
+func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"BF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ScanDump struct {
+ Iter int64
+ Data string
+}
+
+type ScanDumpCmd struct {
+ baseCmd
+
+ val ScanDump
+}
+
+func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd {
+ return &ScanDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ScanDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ScanDumpCmd) SetVal(val ScanDump) {
+ cmd.val = val
+}
+
+func (cmd *ScanDumpCmd) Result() (ScanDump, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ScanDumpCmd) Val() ScanDump {
+ return cmd.val
+}
+
+func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = ScanDump{}
+ for i := 0; i < n; i++ {
+ iter, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ data, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Data = data
+ cmd.val.Iter = iter
+
+ }
+
+ return nil
+}
+
+// Returns information about a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BFInfo struct {
+ Capacity int64
+ Size int64
+ Filters int64
+ ItemsInserted int64
+ ExpansionRate int64
+}
+
+type BFInfoCmd struct {
+ baseCmd
+
+ val BFInfo
+}
+
+func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd {
+ return &BFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BFInfoCmd) SetVal(val BFInfo) {
+ cmd.val = val
+}
+
+func (cmd *BFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BFInfoCmd) Val() BFInfo {
+ return cmd.val
+}
+
+func (cmd *BFInfoCmd) Result() (BFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result BFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of filters":
+ result.Filters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.ItemsInserted, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// BFInfoCapacity returns information about the capacity of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "CAPACITY")
+}
+
+// BFInfoSize returns information about the size of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "SIZE")
+}
+
+// BFInfoFilters returns information about the filters of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "FILTERS")
+}
+
+// BFInfoItems returns information about the items of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "ITEMS")
+}
+
+// BFInfoExpansion returns information about the expansion rate of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "EXPANSION")
+}
+
+// BFInfoArg returns information about a specific option of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key, option}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFInsert inserts elements into a Bloom filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.insert/
+func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.INSERT", key}
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.Error != 0 {
+ args = append(args, "ERROR", options.Error)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMAdd adds multiple elements to a Bloom filter.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/bf.madd/
+func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MADD", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMExists check if multiple elements exist in a Bloom filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/bf.mexists/
+func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MEXISTS", key}
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// Cuckoo filter commands
+//-------------------------------------------
+
+// CFReserve creates an empty Cuckoo filter with the specified capacity.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveWithArgs creates an empty Cuckoo filter with the specified options.
+// This function allows for specifying additional options such as bucket size and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, options.Capacity}
+ if options.BucketSize != 0 {
+ args = append(args, "BUCKETSIZE", options.BucketSize)
+ }
+ if options.MaxIterations != 0 {
+ args = append(args, "MAXITERATIONS", options.MaxIterations)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAdd adds an element to a Cuckoo filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.add/
+func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.addnx/
+func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADDNX", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter.
+// For more information - https://redis.io/commands/cf.count/
+func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd {
+ args := []interface{}{"CF.COUNT", key, element}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFDel deletes an item once from the cuckoo filter.
+// For more information - https://redis.io/commands/cf.del/
+func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.DEL", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFExists determines whether an item may exist in the Cuckoo Filter or not.
+// For more information - https://redis.io/commands/cf.exists/
+func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFLoadChunk restores a filter previously saved using SCANDUMP.
+// For more information - https://redis.io/commands/cf.loadchunk/
+func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"CF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFScanDump begins an incremental save of the cuckoo filter.
+// For more information - https://redis.io/commands/cf.scandump/
+func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"CF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CFInfo struct {
+ Size int64
+ NumBuckets int64
+ NumFilters int64
+ NumItemsInserted int64
+ NumItemsDeleted int64
+ BucketSize int64
+ ExpansionRate int64
+ MaxIteration int64
+}
+
+type CFInfoCmd struct {
+ baseCmd
+
+ val CFInfo
+}
+
+func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd {
+ return &CFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CFInfoCmd) SetVal(val CFInfo) {
+ cmd.val = val
+}
+
+func (cmd *CFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CFInfoCmd) Val() CFInfo {
+ return cmd.val
+}
+
+func (cmd *CFInfoCmd) Result() (CFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of buckets":
+ result.NumBuckets, err = rd.ReadInt()
+ case "Number of filters":
+ result.NumFilters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.NumItemsInserted, err = rd.ReadInt()
+ case "Number of items deleted":
+ result.NumItemsDeleted, err = rd.ReadInt()
+ case "Bucket size":
+ result.BucketSize, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ case "Max iterations":
+ result.MaxIteration, err = rd.ReadInt()
+
+ default:
+ return fmt.Errorf("redis: CF.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CFInfo returns information about a Cuckoo filter.
+// For more information - https://redis.io/commands/cf.info/
+func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd {
+ args := []interface{}{"CF.INFO", key}
+ cmd := NewCFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsert inserts elements into a Cuckoo filter.
+// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insert/
+func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.INSERT", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of integers indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insertnx/
+func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CF.INSERTNX", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) getCfInsertWithArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} {
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ return args
+}
+
+// CFMExists check if multiple elements exist in a Cuckoo filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/cf.mexists/
+func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.MEXISTS", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// CMS commands
+//-------------------------------------------
+
+// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter.
+// Returns an array of integers representing the updated count of each item.
+// For more information - https://redis.io/commands/cms.incrby/
+func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "CMS.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CMSInfo struct {
+ Width int64
+ Depth int64
+ Count int64
+}
+
+type CMSInfoCmd struct {
+ baseCmd
+
+ val CMSInfo
+}
+
+func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd {
+ return &CMSInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CMSInfoCmd) SetVal(val CMSInfo) {
+ cmd.val = val
+}
+
+func (cmd *CMSInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CMSInfoCmd) Val() CMSInfo {
+ return cmd.val
+}
+
+func (cmd *CMSInfoCmd) Result() (CMSInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CMSInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "count":
+ result.Count, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: CMS.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CMSInfo returns information about a Count-Min Sketch filter.
+// For more information - https://redis.io/commands/cms.info/
+func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd {
+ args := []interface{}{"CMS.INFO", key}
+ cmd := NewCMSInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions.
+// For more information - https://redis.io/commands/cms.initbydim/
+func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYDIM", key, width, depth}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability.
+// For more information - https://redis.io/commands/cms.initbyprob/
+func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMerge merges multiple Count-Min Sketch filters into a single filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)}
+ for _, s := range sourceKeys {
+ args = append(args, s)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd {
+ args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1))
+ args = append(args, "CMS.MERGE", destKey, len(sourceKeys))
+
+ if len(sourceKeys) > 0 {
+ sk := make([]interface{}, len(sourceKeys))
+ sw := make([]interface{}, len(sourceKeys))
+
+ i := 0
+ for k, w := range sourceKeys {
+ sk[i] = k
+ sw[i] = w
+ i++
+ }
+
+ args = append(args, sk...)
+ args = append(args, "WEIGHTS")
+ args = append(args, sw...)
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSQuery returns count for item(s).
+// For more information - https://redis.io/commands/cms.query/
+func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CMS.QUERY", key}
+ args = append(args, elements...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// TopK commands
+//--------------------------------------------
+
+// TopKAdd adds one or more elements to a Top-K filter.
+// Returns an array of strings representing the items that were removed from the filter, if any.
+// For more information - https://redis.io/commands/topk.add/
+func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.ADD"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserve creates an empty Top-K filter with the specified number of top items to keep.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options.
+// This function allows for specifying additional options such as width, depth and decay.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TopKInfo struct {
+ K int64
+ Width int64
+ Depth int64
+ Decay float64
+}
+
+type TopKInfoCmd struct {
+ baseCmd
+
+ val TopKInfo
+}
+
+func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd {
+ return &TopKInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TopKInfoCmd) SetVal(val TopKInfo) {
+ cmd.val = val
+}
+
+func (cmd *TopKInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TopKInfoCmd) Val() TopKInfo {
+ return cmd.val
+}
+
+func (cmd *TopKInfoCmd) Result() (TopKInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TopKInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "k":
+ result.K, err = rd.ReadInt()
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "decay":
+ result.Decay, err = rd.ReadFloat()
+ default:
+ return fmt.Errorf("redis: topk.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TopKInfo returns information about a Top-K filter.
+// For more information - https://redis.io/commands/topk.info/
+func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd {
+ args := []interface{}{"TOPK.INFO", key}
+
+ cmd := NewTopKInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKQuery check if multiple elements exist in a Top-K filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/topk.query/
+func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.QUERY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKCount returns an estimate of the number of times an item may be in a Top-K filter.
+// For more information - https://redis.io/commands/topk.count/
+func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.COUNT"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKIncrBy increases the count of one or more items in a Top-K filter.
+// For more information - https://redis.io/commands/topk.incrby/
+func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKList returns all items in Top-K list.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd {
+ args := []interface{}{"TOPK.LIST", key}
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKListWithCount returns all items in Top-K list with their respective count.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd {
+ args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"}
+
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// t-digest commands
+// --------------------------------------------
+
+// TDigestAdd adds one or more elements to a t-Digest data structure.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.add/
+func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.ADD"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRank returns an array of values from a t-Digest data structure based on their rank.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrank/
+func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrevrank/
+func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYREVRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure.
+// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.cdf/
+func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.CDF"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreate creates an empty t-Digest data structure with default parameters.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter.
+// The compression parameter controls the accuracy and memory usage of the t-Digest.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestInfo struct {
+ Compression int64
+ Capacity int64
+ MergedNodes int64
+ UnmergedNodes int64
+ MergedWeight int64
+ UnmergedWeight int64
+ Observations int64
+ TotalCompressions int64
+ MemoryUsage int64
+}
+
+type TDigestInfoCmd struct {
+ baseCmd
+
+ val TDigestInfo
+}
+
+func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd {
+ return &TDigestInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) {
+ cmd.val = val
+}
+
+func (cmd *TDigestInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TDigestInfoCmd) Val() TDigestInfo {
+ return cmd.val
+}
+
+func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TDigestInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Compression":
+ result.Compression, err = rd.ReadInt()
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Merged nodes":
+ result.MergedNodes, err = rd.ReadInt()
+ case "Unmerged nodes":
+ result.UnmergedNodes, err = rd.ReadInt()
+ case "Merged weight":
+ result.MergedWeight, err = rd.ReadInt()
+ case "Unmerged weight":
+ result.UnmergedWeight, err = rd.ReadInt()
+ case "Observations":
+ result.Observations, err = rd.ReadInt()
+ case "Total compressions":
+ result.TotalCompressions, err = rd.ReadInt()
+ case "Memory usage":
+ result.MemoryUsage, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: tdigest.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TDigestInfo returns information about a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.info/
+func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd {
+ args := []interface{}{"TDIGEST.INFO", key}
+
+ cmd := NewTDigestInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMax returns the maximum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.max/
+func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MAX", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestMergeOptions struct {
+ Compression int64
+ Override bool
+}
+
+// TDigestMerge merges multiple t-Digest data structures into a single t-Digest.
+// This function also allows for specifying additional options such as compression and override behavior.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.merge/
+func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)}
+
+ for _, sourceKey := range sourceKeys {
+ args = append(args, sourceKey)
+ }
+
+ if options != nil {
+ if options.Compression != 0 {
+ args = append(args, "COMPRESSION", options.Compression)
+ }
+ if options.Override {
+ args = append(args, "OVERRIDE")
+ }
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMin returns the minimum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.min/
+func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MIN", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure.
+// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.quantile/
+func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.QUANTILE"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.rank/
+func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.RANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestReset resets a t-Digest data structure to its initial state.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.reset/
+func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.RESET", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.revrank/
+func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.REVRANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure.
+// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values.
+// Returns a float representing the trimmed mean value or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.trimmed_mean/
+func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd {
+ args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go
new file mode 100644
index 0000000..72b18f4
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pubsub.go
@@ -0,0 +1,729 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+ schannels map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh *channel
+ allCh *channel
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) String() string {
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ channels = append(channels, mapKeys(c.schannels)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(ctx, nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(ctx, channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(ctx, cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if len(c.schannels) > 0 {
+ err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
+ c.mu.Lock()
+ c.releaseConn(ctx, cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout, c.opt.Addr) {
+ c.reconnect(ctx, err)
+ }
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "ssubscribe", channels...)
+ if c.schannels == nil {
+ c.schannels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.schannels[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.channels {
+ delete(c.channels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(patterns) > 0 {
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ } else {
+ // Unsubscribe from all patterns.
+ for pattern := range c.patterns {
+ delete(c.patterns, pattern)
+ }
+ }
+
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
+ return err
+}
+
+// SUnsubscribe unsubscribes the client from the given shard channels,
+// or from all of them if none is given.
+func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.schannels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.schannels {
+ delete(c.schannels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "sunsubscribe", channels...)
+ return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(ctx, args...)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ cn, err := c.conn(ctx, nil)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message", "smessage":
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd(ctx)
+ }
+
+ // Don't hold the lock to allow subscriptions and pings.
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 1 minute the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not received for 1 minute.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.msgCh = newChannel(c, opts...)
+ c.msgCh.initMsgChan()
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.allCh = newChannel(c, opts...)
+ c.allCh.initAllChan()
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+ return func(c *channel) {
+ c.chanSize = size
+ }
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.checkInterval = d
+ }
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.chanSendTimeout = d
+ }
+}
+
+type channel struct {
+ pubSub *PubSub
+
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+
+ chanSize int
+ chanSendTimeout time.Duration
+ checkInterval time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+ c := &channel{
+ pubSub: pubSub,
+
+ chanSize: 100,
+ chanSendTimeout: time.Minute,
+ checkInterval: 3 * time.Second,
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.checkInterval > 0 {
+ c.initHealthCheck()
+ }
+ return c
+}
+
+func (c *channel) initHealthCheck() {
+ ctx := context.TODO()
+ c.ping = make(chan struct{}, 1)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ for {
+ timer.Reset(c.checkInterval)
+ select {
+ case <-c.ping:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+ c.pubSub.mu.Lock()
+ c.pubSub.reconnect(ctx, pingErr)
+ c.pubSub.mu.Unlock()
+ }
+ case <-c.pubSub.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *channel) initMsgChan() {
+ ctx := context.TODO()
+ c.msgCh = make(chan *Message, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *channel) initAllChan() {
+ ctx := context.TODO()
+ c.allCh = make(chan interface{}, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Pong:
+ // Ignore.
+ case *Subscription, *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pubsub_commands.go b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
new file mode 100644
index 0000000..28622aa
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
@@ -0,0 +1,76 @@
+package redis
+
+import "context"
+
+type PubSubCmdable interface {
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+}
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "spublish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "shardchannels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "shardnumsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go
new file mode 100644
index 0000000..ec3ff61
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/redis.go
@@ -0,0 +1,879 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// Scanner internal/hscan.Scanner exposed interface.
+type Scanner = hscan.Scanner
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+// SetLogger set custom log
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ DialHook(next DialHook) DialHook
+ ProcessHook(next ProcessHook) ProcessHook
+ ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
+}
+
+type (
+ DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
+ ProcessHook func(ctx context.Context, cmd Cmder) error
+ ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
+)
+
+type hooksMixin struct {
+ hooksMu *sync.RWMutex
+
+ slice []Hook
+ initial hooks
+ current hooks
+}
+
+func (hs *hooksMixin) initHooks(hooks hooks) {
+ hs.hooksMu = new(sync.RWMutex)
+ hs.initial = hooks
+ hs.chain()
+}
+
+type hooks struct {
+ dial DialHook
+ process ProcessHook
+ pipeline ProcessPipelineHook
+ txPipeline ProcessPipelineHook
+}
+
+func (h *hooks) setDefaults() {
+ if h.dial == nil {
+ h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
+ }
+ if h.process == nil {
+ h.process = func(ctx context.Context, cmd Cmder) error { return nil }
+ }
+ if h.pipeline == nil {
+ h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+ if h.txPipeline == nil {
+ h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+}
+
+// AddHook is to add a hook to the queue.
+// Hook is a function executed during network connection, command execution, and pipeline,
+// it is a first-in-first-out stack queue (FIFO).
+// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
+// For example, you added hook-1, hook-2:
+//
+// client.AddHook(hook-1, hook-2)
+//
+// hook-1:
+//
+// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd Cmder) error {
+// print("hook-1 start")
+// next(ctx, cmd)
+// print("hook-1 end")
+// return nil
+// }
+// }
+//
+// hook-2:
+//
+// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd redis.Cmder) error {
+// print("hook-2 start")
+// next(ctx, cmd)
+// print("hook-2 end")
+// return nil
+// }
+// }
+//
+// The execution sequence is:
+//
+// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
+//
+// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
+// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
+func (hs *hooksMixin) AddHook(hook Hook) {
+ hs.slice = append(hs.slice, hook)
+ hs.chain()
+}
+
+func (hs *hooksMixin) chain() {
+ hs.initial.setDefaults()
+
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ hs.current.dial = hs.initial.dial
+ hs.current.process = hs.initial.process
+ hs.current.pipeline = hs.initial.pipeline
+ hs.current.txPipeline = hs.initial.txPipeline
+
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
+ hs.current.dial = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
+ hs.current.process = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
+ hs.current.pipeline = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
+ hs.current.txPipeline = wrapped
+ }
+ }
+}
+
+func (hs *hooksMixin) clone() hooksMixin {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ clone := *hs
+ l := len(clone.slice)
+ clone.slice = clone.slice[:l:l]
+ clone.hooksMu = new(sync.RWMutex)
+ return clone
+}
+
+func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmd)
+}
+
+func (hs *hooksMixin) withProcessPipelineHook(
+ ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
+) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmds)
+}
+
+func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
+ // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...)
+ // while this dialer is concurrently accessed by the background connection pool population
+ // routine when MinIdleConns > 0.
+ hs.hooksMu.RLock()
+ current := hs.current
+ hs.hooksMu.RUnlock()
+
+ return current.dial(ctx, network, addr)
+}
+
+func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
+ return hs.current.process(ctx, cmd)
+}
+
+func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.pipeline(ctx, cmds)
+}
+
+func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.txPipeline(ctx, cmds)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ var err error
+ username, password := c.opt.Username, c.opt.Password
+ if c.opt.CredentialsProviderContext != nil {
+ if username, password, err = c.opt.CredentialsProviderContext(ctx); err != nil {
+ return err
+ }
+ } else if c.opt.CredentialsProvider != nil {
+ username, password = c.opt.CredentialsProvider()
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(c.opt, connPool)
+
+ var auth bool
+ protocol := c.opt.Protocol
+ // By default, use RESP3 in current version.
+ if protocol < 2 {
+ protocol = 3
+ }
+
+ // for redis-server versions that do not support the HELLO command,
+ // RESP2 will continue to be used.
+ if err = conn.Hello(ctx, protocol, username, password, "").Err(); err == nil {
+ auth = true
+ } else if !isRedisError(err) {
+ // When the server responds with the RESP protocol and the result is not a normal
+ // execution result of the HELLO command, we consider it to be an indication that
+ // the server does not support the HELLO command.
+ // The server may be a redis-server that does not support the HELLO command,
+ // or it could be DragonflyDB or a third-party redis-proxy. They all respond
+ // with different error string results for unsupported commands, making it
+ // difficult to rely on error strings to determine all results.
+ return err
+ }
+
+ _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if !auth && password != "" {
+ if username != "" {
+ pipe.AuthACL(ctx, username, password)
+ } else {
+ pipe.Auth(ctx, password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ if c.opt.ClientName != "" {
+ pipe.ClientSetName(ctx, c.opt.ClientName)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !c.opt.DisableIndentity {
+ libName := ""
+ libVer := Version()
+ if c.opt.IdentitySuffix != "" {
+ libName = c.opt.IdentitySuffix
+ }
+ p := conn.Pipeline()
+ p.ClientSetInfo(ctx, WithLibraryName(libName))
+ p.ClientSetInfo(ctx, WithLibraryVersion(libVer))
+ _, _ = p.Exec(ctx)
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ var fnErr error
+ defer func() {
+ c.releaseConn(ctx, cn, fnErr)
+ }()
+
+ fnErr = fn(ctx, cn)
+
+ return fnErr
+}
+
+func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.opt.Dialer(ctx, network, addr)
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) assertUnstableCommand(cmd Cmder) bool {
+ switch cmd.(type) {
+ case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd:
+ if c.opt.UnstableResp3 {
+ return true
+ } else {
+ panic("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3 . See the [README](https://github.com/redis/go-redis/blob/master/README.md) and the release notes for guidance.")
+ }
+ default:
+ return false
+ }
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(0)
+ if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ }); err != nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ return err
+ }
+ readReplyFunc := cmd.readReply
+ // Apply unstable RESP3 search module.
+ if c.opt.Protocol != 2 && c.assertUnstableCommand(cmd) {
+ readReplyFunc = cmd.readRawReply
+ }
+ if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), readReplyFunc); err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ } else {
+ atomic.StoreUint32(&retryTimeout, 0)
+ }
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+ }
+
+ return false, nil
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ // Enable retries by default to retry dial errors returned by withConn.
+ canRetry := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ }); err != nil {
+ return true, err
+ }
+
+ return false, nil
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+ // Retry errors like "LOADING redis is loading the dataset in memory".
+ return cmds[0].Err()
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ }); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse +OK.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ // Parse +QUEUED.
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *baseClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more underlying connections.
+// It's safe for concurrent use by multiple goroutines.
+//
+// Client creates and frees connections automatically; it also maintains a free pool
+// of idle connections. You can control the pool size with Config.PoolSize option.
+type Client struct {
+ *baseClient
+ cmdable
+ hooksMixin
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ c.init()
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return &c
+}
+
+func (c *Client) init() {
+ c.cmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := *c
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ clone.init()
+ return &clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooksMixin
+}
+
+func newConn(opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ }
+
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipelineHook,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/redis/go-redis/v9/result.go b/vendor/github.com/redis/go-redis/v9/result.go
new file mode 100644
index 0000000..cfd4cf9
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/result.go
@@ -0,0 +1,188 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
+func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
+ var cmd MapStringStringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
+func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
+ var cmd MapStringIntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
+func NewXPendingResult(val *XPending, err error) *XPendingCmd {
+ var cmd XPendingCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go
new file mode 100644
index 0000000..b402217
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/ring.go
@@ -0,0 +1,829 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous" //nolint
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided options.
+ NewClient func(opt *Options) *Client
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+
+ DisableIndentity bool
+ IdentitySuffix string
+ UnstableResp3 bool
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+ addr string
+}
+
+func newRingShard(opt *RingOptions, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(clopt),
+ addr: addr,
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringSharding struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ shards *ringShards
+ closed bool
+ hash ConsistentHash
+ numShard int
+ onNewNode []func(rdb *Client)
+
+ // ensures exclusive access to SetAddrs so there is no need
+ // to hold mu for the duration of potentially long shard creation
+ setAddrsMu sync.Mutex
+}
+
+type ringShards struct {
+ m map[string]*ringShard
+ list []*ringShard
+}
+
+func newRingSharding(opt *RingOptions) *ringSharding {
+ c := &ringSharding{
+ opt: opt,
+ }
+ c.SetAddrs(opt.Addrs)
+
+ return c
+}
+
+func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+// SetAddrs replaces the shards in use, such that you can increase and
+// decrease number of shards, that you use. It will reuse shards that
+// existed before and close the ones that will not be used anymore.
+func (c *ringSharding) SetAddrs(addrs map[string]string) {
+ c.setAddrsMu.Lock()
+ defer c.setAddrsMu.Unlock()
+
+ cleanup := func(shards map[string]*ringShard) {
+ for addr, shard := range shards {
+ if err := shard.Client.Close(); err != nil {
+ internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
+ }
+ }
+ }
+
+ c.mu.RLock()
+ if c.closed {
+ c.mu.RUnlock()
+ return
+ }
+ existing := c.shards
+ c.mu.RUnlock()
+
+ shards, created, unused := c.newRingShards(addrs, existing)
+
+ c.mu.Lock()
+ if c.closed {
+ cleanup(created)
+ c.mu.Unlock()
+ return
+ }
+ c.shards = shards
+ c.rebalanceLocked()
+ c.mu.Unlock()
+
+ cleanup(unused)
+}
+
+func (c *ringSharding) newRingShards(
+ addrs map[string]string, existing *ringShards,
+) (shards *ringShards, created, unused map[string]*ringShard) {
+ shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
+ created = make(map[string]*ringShard) // indexed by addr
+ unused = make(map[string]*ringShard) // indexed by addr
+
+ if existing != nil {
+ for _, shard := range existing.list {
+ unused[shard.addr] = shard
+ }
+ }
+
+ for name, addr := range addrs {
+ if shard, ok := unused[addr]; ok {
+ shards.m[name] = shard
+ delete(unused, addr)
+ } else {
+ shard := newRingShard(c.opt, addr)
+ shards.m[name] = shard
+ created[addr] = shard
+
+ for _, fn := range c.onNewNode {
+ fn(shard.Client)
+ }
+ }
+ }
+
+ for _, shard := range shards.m {
+ shards.list = append(shards.list, shard)
+ }
+
+ return
+}
+
+func (c *ringSharding) List() []*ringShard {
+ var list []*ringShard
+
+ c.mu.RLock()
+ if !c.closed {
+ list = c.shards.list
+ }
+ c.mu.RUnlock()
+
+ return list
+}
+
+func (c *ringSharding) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+
+ return hash
+}
+
+func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ return nil, errRingShardsDown
+ }
+
+ shardName := c.hash.Get(key)
+ if shardName == "" {
+ return nil, errRingShardsDown
+ }
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// Heartbeat monitors state of each shard in the ring.
+func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.mu.Lock()
+ c.rebalanceLocked()
+ c.mu.Unlock()
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// rebalanceLocked removes dead shards from the Ring.
+// Requires c.mu locked.
+func (c *ringSharding) rebalanceLocked() {
+ if c.closed {
+ return
+ }
+ if c.shards == nil {
+ return
+ }
+
+ liveShards := make([]string, 0, len(c.shards.m))
+
+ for name, shard := range c.shards.m {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ c.hash = c.opt.NewConsistentHash(liveShards)
+ c.numShard = len(liveShards)
+}
+
+func (c *ringSharding) Len() int {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.numShard
+}
+
+func (c *ringSharding) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+
+ for _, shard := range c.shards.list {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.hash = nil
+ c.shards = nil
+ c.numShard = 0
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ cmdable
+ hooksMixin
+
+ opt *RingOptions
+ sharding *ringSharding
+ cmdsInfoCache *cmdsInfoCache
+ heartbeatCancelFn context.CancelFunc
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ hbCtx, hbCancel := context.WithCancel(context.Background())
+
+ ring := Ring{
+ opt: opt,
+ sharding: newRingSharding(opt),
+ heartbeatCancelFn: hbCancel,
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ ring.initHooks(hooks{
+ process: ring.process,
+ pipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, false)
+ },
+ txPipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, true)
+ },
+ })
+
+ go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) SetAddrs(addrs map[string]string) {
+ c.sharding.SetAddrs(addrs)
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.sharding.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.sharding.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.SSubscribe(ctx, channels...)
+}
+
+func (c *Ring) OnNewNode(fn func(rdb *Client)) {
+ c.sharding.OnNewNode(fn)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ shards := c.sharding.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ shards := c.sharding.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ pos := cmdFirstKeyPos(cmd)
+ if pos == 0 {
+ return c.sharding.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.sharding.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ if tx {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+ }
+
+ cmdsMap := make(map[string][]Cmder)
+
+ for _, cmd := range cmds {
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd))
+ if hash != "" {
+ hash = c.sharding.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ // TODO: retry?
+ shard, err := c.sharding.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ if tx {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = shard.Client.processTxPipelineHook(ctx, cmds)
+ } else {
+ _ = shard.Client.processPipelineHook(ctx, cmds)
+ }
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.sharding.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ c.heartbeatCancelFn()
+
+ return c.sharding.Close()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/script.go b/vendor/github.com/redis/go-redis/v9/script.go
new file mode 100644
index 0000000..626ab03
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/script.go
@@ -0,0 +1,84 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+)
+
+type Scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalRO(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalShaRO(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
+
+// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
+// it is retried using EVAL_RO.
+func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalShaRO(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.EvalRO(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/redis/go-redis/v9/scripting_commands.go b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
new file mode 100644
index 0000000..af9c339
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
@@ -0,0 +1,215 @@
+package redis
+
+import "context"
+
+type ScriptingFunctionsCmdable interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+}
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval", script, keys, args...)
+}
+
+func (c cmdable) EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval_ro", script, keys, args...)
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha", sha1, keys, args...)
+}
+
+func (c cmdable) EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha_ro", sha1, keys, args...)
+}
+
+func (c cmdable) eval(ctx context.Context, name, payload string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = name
+ cmdArgs[1] = payload
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+
+ // it is possible that only args exist without a key.
+ // rdb.eval(ctx, eval, script, nil, arg1, arg2)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------
+
+// FunctionListQuery is used with FunctionList to query for Redis libraries
+//
+// LibraryNamePattern - Use an empty string to get all libraries.
+// - Use a glob-style pattern to match multiple libraries with a matching name
+// - Use a library's full name to match a single library
+// WithCode - If true, it will return the code of the library
+type FunctionListQuery struct {
+ LibraryNamePattern string
+ WithCode bool
+}
+
+func (c cmdable) FunctionLoad(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionLoadReplace(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", "replace", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDelete(ctx context.Context, libName string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "delete", libName)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlush(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionKill(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlushAsync(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd {
+ args := make([]interface{}, 2, 5)
+ args[0] = "function"
+ args[1] = "list"
+ if q.LibraryNamePattern != "" {
+ args = append(args, "libraryname", q.LibraryNamePattern)
+ }
+ if q.WithCode {
+ args = append(args, "withcode")
+ }
+ cmd := NewFunctionListCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDump(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "dump")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionRestore(ctx context.Context, libDump string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "restore", libDump)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionStats(ctx context.Context) *FunctionStatsCmd {
+ cmd := NewFunctionStatsCmd(ctx, "function", "stats")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FCallRo this function simply calls FCallRO,
+// Deprecated: to maintain convention FCallRO.
+func (c cmdable) FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ return c.FCallRO(ctx, function, keys, args...)
+}
+
+func (c cmdable) FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall_ro", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func fcallArgs(command string, function string, keys []string, args ...interface{}) []interface{} {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = command
+ cmdArgs[1] = function
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+
+ cmdArgs = append(cmdArgs, args...)
+ return cmdArgs
+}
diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go
new file mode 100644
index 0000000..9359a72
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/search_commands.go
@@ -0,0 +1,2268 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type SearchCmdable interface {
+ FT_List(ctx context.Context) *StringSliceCmd
+ FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd
+ FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd
+ FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd
+ FTAliasDel(ctx context.Context, alias string) *StatusCmd
+ FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd
+ FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd
+ FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd
+ FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd
+ FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd
+ FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd
+ FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd
+ FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd
+ FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd
+ FTDictDump(ctx context.Context, dict string) *StringSliceCmd
+ FTDropIndex(ctx context.Context, index string) *StatusCmd
+ FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd
+ FTExplain(ctx context.Context, index string, query string) *StringCmd
+ FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd
+ FTInfo(ctx context.Context, index string) *FTInfoCmd
+ FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd
+ FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd
+ FTSearch(ctx context.Context, index string, query string) *FTSearchCmd
+ FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd
+ FTSynDump(ctx context.Context, index string) *FTSynDumpCmd
+ FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd
+ FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd
+ FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd
+}
+
+type FTCreateOptions struct {
+ OnHash bool
+ OnJSON bool
+ Prefix []interface{}
+ Filter string
+ DefaultLanguage string
+ LanguageField string
+ Score float64
+ ScoreField string
+ PayloadField string
+ MaxTextFields int
+ NoOffsets bool
+ Temporary int
+ NoHL bool
+ NoFields bool
+ NoFreqs bool
+ StopWords []interface{}
+ SkipInitialScan bool
+}
+
+type FieldSchema struct {
+ FieldName string
+ As string
+ FieldType SearchFieldType
+ Sortable bool
+ UNF bool
+ NoStem bool
+ NoIndex bool
+ PhoneticMatcher string
+ Weight float64
+ Separator string
+ CaseSensitive bool
+ WithSuffixtrie bool
+ VectorArgs *FTVectorArgs
+ GeoShapeFieldType string
+ IndexEmpty bool
+ IndexMissing bool
+}
+
+type FTVectorArgs struct {
+ FlatOptions *FTFlatOptions
+ HNSWOptions *FTHNSWOptions
+}
+
+type FTFlatOptions struct {
+ Type string
+ Dim int
+ DistanceMetric string
+ InitialCapacity int
+ BlockSize int
+}
+
+type FTHNSWOptions struct {
+ Type string
+ Dim int
+ DistanceMetric string
+ InitialCapacity int
+ MaxEdgesPerNode int
+ MaxAllowedEdgesPerNode int
+ EFRunTime int
+ Epsilon float64
+}
+
+type FTDropIndexOptions struct {
+ DeleteDocs bool
+}
+
+type SpellCheckTerms struct {
+ Include bool
+ Exclude bool
+ Dictionary string
+}
+
+type FTExplainOptions struct {
+ Dialect string
+}
+
+type FTSynUpdateOptions struct {
+ SkipInitialScan bool
+}
+
+type SearchAggregator int
+
+const (
+ SearchInvalid = SearchAggregator(iota)
+ SearchAvg
+ SearchSum
+ SearchMin
+ SearchMax
+ SearchCount
+ SearchCountDistinct
+ SearchCountDistinctish
+ SearchStdDev
+ SearchQuantile
+ SearchToList
+ SearchFirstValue
+ SearchRandomSample
+)
+
+func (a SearchAggregator) String() string {
+ switch a {
+ case SearchInvalid:
+ return ""
+ case SearchAvg:
+ return "AVG"
+ case SearchSum:
+ return "SUM"
+ case SearchMin:
+ return "MIN"
+ case SearchMax:
+ return "MAX"
+ case SearchCount:
+ return "COUNT"
+ case SearchCountDistinct:
+ return "COUNT_DISTINCT"
+ case SearchCountDistinctish:
+ return "COUNT_DISTINCTISH"
+ case SearchStdDev:
+ return "STDDEV"
+ case SearchQuantile:
+ return "QUANTILE"
+ case SearchToList:
+ return "TOLIST"
+ case SearchFirstValue:
+ return "FIRST_VALUE"
+ case SearchRandomSample:
+ return "RANDOM_SAMPLE"
+ default:
+ return ""
+ }
+}
+
+type SearchFieldType int
+
+const (
+ SearchFieldTypeInvalid = SearchFieldType(iota)
+ SearchFieldTypeNumeric
+ SearchFieldTypeTag
+ SearchFieldTypeText
+ SearchFieldTypeGeo
+ SearchFieldTypeVector
+ SearchFieldTypeGeoShape
+)
+
+func (t SearchFieldType) String() string {
+ switch t {
+ case SearchFieldTypeInvalid:
+ return ""
+ case SearchFieldTypeNumeric:
+ return "NUMERIC"
+ case SearchFieldTypeTag:
+ return "TAG"
+ case SearchFieldTypeText:
+ return "TEXT"
+ case SearchFieldTypeGeo:
+ return "GEO"
+ case SearchFieldTypeVector:
+ return "VECTOR"
+ case SearchFieldTypeGeoShape:
+ return "GEOSHAPE"
+ default:
+ return "TEXT"
+ }
+}
+
+// Each AggregateReducer have different args.
+// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information.
+type FTAggregateReducer struct {
+ Reducer SearchAggregator
+ Args []interface{}
+ As string
+}
+
+type FTAggregateGroupBy struct {
+ Fields []interface{}
+ Reduce []FTAggregateReducer
+}
+
+type FTAggregateSortBy struct {
+ FieldName string
+ Asc bool
+ Desc bool
+}
+
+type FTAggregateApply struct {
+ Field string
+ As string
+}
+
+type FTAggregateLoad struct {
+ Field string
+ As string
+}
+
+type FTAggregateWithCursor struct {
+ Count int
+ MaxIdle int
+}
+
+type FTAggregateOptions struct {
+ Verbatim bool
+ LoadAll bool
+ Load []FTAggregateLoad
+ Timeout int
+ GroupBy []FTAggregateGroupBy
+ SortBy []FTAggregateSortBy
+ SortByMax int
+ Scorer string
+ AddScores bool
+ Apply []FTAggregateApply
+ LimitOffset int
+ Limit int
+ Filter string
+ WithCursor bool
+ WithCursorOptions *FTAggregateWithCursor
+ Params map[string]interface{}
+ DialectVersion int
+}
+
+type FTSearchFilter struct {
+ FieldName interface{}
+ Min interface{}
+ Max interface{}
+}
+
+type FTSearchGeoFilter struct {
+ FieldName string
+ Longitude float64
+ Latitude float64
+ Radius float64
+ Unit string
+}
+
+type FTSearchReturn struct {
+ FieldName string
+ As string
+}
+
+type FTSearchSortBy struct {
+ FieldName string
+ Asc bool
+ Desc bool
+}
+
+type FTSearchOptions struct {
+ NoContent bool
+ Verbatim bool
+ NoStopWords bool
+ WithScores bool
+ WithPayloads bool
+ WithSortKeys bool
+ Filters []FTSearchFilter
+ GeoFilter []FTSearchGeoFilter
+ InKeys []interface{}
+ InFields []interface{}
+ Return []FTSearchReturn
+ Slop int
+ Timeout int
+ InOrder bool
+ Language string
+ Expander string
+ Scorer string
+ ExplainScore bool
+ Payload string
+ SortBy []FTSearchSortBy
+ SortByWithCount bool
+ LimitOffset int
+ Limit int
+ Params map[string]interface{}
+ DialectVersion int
+}
+
+type FTSynDumpResult struct {
+ Term string
+ Synonyms []string
+}
+
+type FTSynDumpCmd struct {
+ baseCmd
+ val []FTSynDumpResult
+}
+
+type FTAggregateResult struct {
+ Total int
+ Rows []AggregateRow
+}
+
+type AggregateRow struct {
+ Fields map[string]interface{}
+}
+
+type AggregateCmd struct {
+ baseCmd
+ val *FTAggregateResult
+}
+
+type FTInfoResult struct {
+ IndexErrors IndexErrors
+ Attributes []FTAttribute
+ BytesPerRecordAvg string
+ Cleaning int
+ CursorStats CursorStats
+ DialectStats map[string]int
+ DocTableSizeMB float64
+ FieldStatistics []FieldStatistic
+ GCStats GCStats
+ GeoshapesSzMB float64
+ HashIndexingFailures int
+ IndexDefinition IndexDefinition
+ IndexName string
+ IndexOptions []string
+ Indexing int
+ InvertedSzMB float64
+ KeyTableSizeMB float64
+ MaxDocID int
+ NumDocs int
+ NumRecords int
+ NumTerms int
+ NumberOfUses int
+ OffsetBitsPerRecordAvg string
+ OffsetVectorsSzMB float64
+ OffsetsPerTermAvg string
+ PercentIndexed float64
+ RecordsPerDocAvg string
+ SortableValuesSizeMB float64
+ TagOverheadSzMB float64
+ TextOverheadSzMB float64
+ TotalIndexMemorySzMB float64
+ TotalIndexingTime int
+ TotalInvertedIndexBlocks int
+ VectorIndexSzMB float64
+}
+
+type IndexErrors struct {
+ IndexingFailures int
+ LastIndexingError string
+ LastIndexingErrorKey string
+}
+
+type FTAttribute struct {
+ Identifier string
+ Attribute string
+ Type string
+ Weight float64
+ Sortable bool
+ NoStem bool
+ NoIndex bool
+ UNF bool
+ PhoneticMatcher string
+ CaseSensitive bool
+ WithSuffixtrie bool
+}
+
+type CursorStats struct {
+ GlobalIdle int
+ GlobalTotal int
+ IndexCapacity int
+ IndexTotal int
+}
+
+type FieldStatistic struct {
+ Identifier string
+ Attribute string
+ IndexErrors IndexErrors
+}
+
+type GCStats struct {
+ BytesCollected int
+ TotalMsRun int
+ TotalCycles int
+ AverageCycleTimeMs string
+ LastRunTimeMs int
+ GCNumericTreesMissed int
+ GCBlocksDenied int
+}
+
+type IndexDefinition struct {
+ KeyType string
+ Prefixes []string
+ DefaultScore float64
+}
+
+type FTSpellCheckOptions struct {
+ Distance int
+ Terms *FTSpellCheckTerms
+ Dialect int
+}
+
+type FTSpellCheckTerms struct {
+ Inclusion string // Either "INCLUDE" or "EXCLUDE"
+ Dictionary string
+ Terms []interface{}
+}
+
+type SpellCheckResult struct {
+ Term string
+ Suggestions []SpellCheckSuggestion
+}
+
+type SpellCheckSuggestion struct {
+ Score float64
+ Suggestion string
+}
+
+type FTSearchResult struct {
+ Total int
+ Docs []Document
+}
+
+type Document struct {
+ ID string
+ Score *float64
+ Payload *string
+ SortKey *string
+ Fields map[string]string
+}
+
+type AggregateQuery []interface{}
+
+// FT_List - Lists all the existing indexes in the database.
+// For more information, please refer to the Redis documentation:
+// [FT._LIST]: (https://redis.io/commands/ft._list/)
+func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT._LIST")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// For more information, please refer to the Redis documentation:
+// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/)
+func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd {
+ args := []interface{}{"FT.AGGREGATE", index, query}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery {
+ queryArgs := []interface{}{query}
+ if options != nil {
+ if options.Verbatim {
+ queryArgs = append(queryArgs, "VERBATIM")
+ }
+
+ if options.Scorer != "" {
+ queryArgs = append(queryArgs, "SCORER", options.Scorer)
+ }
+
+ if options.AddScores {
+ queryArgs = append(queryArgs, "ADDSCORES")
+ }
+
+ if options.LoadAll && options.Load != nil {
+ panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")
+ }
+ if options.LoadAll {
+ queryArgs = append(queryArgs, "LOAD", "*")
+ }
+ if options.Load != nil {
+ queryArgs = append(queryArgs, "LOAD", len(options.Load))
+ index, count := len(queryArgs)-1, 0
+ for _, load := range options.Load {
+ queryArgs = append(queryArgs, load.Field)
+ count++
+ if load.As != "" {
+ queryArgs = append(queryArgs, "AS", load.As)
+ count += 2
+ }
+ }
+ queryArgs[index] = count
+ }
+
+ if options.Timeout > 0 {
+ queryArgs = append(queryArgs, "TIMEOUT", options.Timeout)
+ }
+
+ for _, apply := range options.Apply {
+ queryArgs = append(queryArgs, "APPLY", apply.Field)
+ if apply.As != "" {
+ queryArgs = append(queryArgs, "AS", apply.As)
+ }
+ }
+
+ if options.GroupBy != nil {
+ for _, groupBy := range options.GroupBy {
+ queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields))
+ queryArgs = append(queryArgs, groupBy.Fields...)
+
+ for _, reducer := range groupBy.Reduce {
+ queryArgs = append(queryArgs, "REDUCE")
+ queryArgs = append(queryArgs, reducer.Reducer.String())
+ if reducer.Args != nil {
+ queryArgs = append(queryArgs, len(reducer.Args))
+ queryArgs = append(queryArgs, reducer.Args...)
+ } else {
+ queryArgs = append(queryArgs, 0)
+ }
+ if reducer.As != "" {
+ queryArgs = append(queryArgs, "AS", reducer.As)
+ }
+ }
+ }
+ }
+ if options.SortBy != nil {
+ queryArgs = append(queryArgs, "SORTBY")
+ sortByOptions := []interface{}{}
+ for _, sortBy := range options.SortBy {
+ sortByOptions = append(sortByOptions, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.AGGREGATE: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ sortByOptions = append(sortByOptions, "ASC")
+ }
+ if sortBy.Desc {
+ sortByOptions = append(sortByOptions, "DESC")
+ }
+ }
+ queryArgs = append(queryArgs, len(sortByOptions))
+ queryArgs = append(queryArgs, sortByOptions...)
+ }
+ if options.SortByMax > 0 {
+ queryArgs = append(queryArgs, "MAX", options.SortByMax)
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Filter != "" {
+ queryArgs = append(queryArgs, "FILTER", options.Filter)
+ }
+ if options.WithCursor {
+ queryArgs = append(queryArgs, "WITHCURSOR")
+ if options.WithCursorOptions != nil {
+ if options.WithCursorOptions.Count > 0 {
+ queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count)
+ }
+ if options.WithCursorOptions.MaxIdle > 0 {
+ queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle)
+ }
+ }
+ }
+ if options.Params != nil {
+ queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ queryArgs = append(queryArgs, key, value)
+ }
+ }
+
+ if options.DialectVersion > 0 {
+ queryArgs = append(queryArgs, "DIALECT", options.DialectVersion)
+ }
+ }
+ return queryArgs
+}
+
+func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) {
+ if len(data) == 0 {
+ return nil, fmt.Errorf("no data returned")
+ }
+
+ total, ok := data[0].(int64)
+ if !ok {
+ return nil, fmt.Errorf("invalid total format")
+ }
+
+ rows := make([]AggregateRow, 0, len(data)-1)
+ for _, row := range data[1:] {
+ fields, ok := row.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid row format")
+ }
+
+ rowMap := make(map[string]interface{})
+ for i := 0; i < len(fields); i += 2 {
+ key, ok := fields[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid field key format")
+ }
+ value := fields[i+1]
+ rowMap[key] = value
+ }
+ rows = append(rows, AggregateRow{Fields: rowMap})
+ }
+
+ result := &FTAggregateResult{
+ Total: int(total),
+ Rows: rows,
+ }
+ return result, nil
+}
+
+func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd {
+ return &AggregateCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) {
+ cmd.val = val
+}
+
+func (cmd *AggregateCmd) Val() *FTAggregateResult {
+ return cmd.val
+}
+
+func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *AggregateCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *AggregateCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *AggregateCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = ProcessAggregateResult(data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion.
+// For more information, please refer to the Redis documentation:
+// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/)
+func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd {
+ args := []interface{}{"FT.AGGREGATE", index, query}
+ if options != nil {
+ if options.Verbatim {
+ args = append(args, "VERBATIM")
+ }
+ if options.Scorer != "" {
+ args = append(args, "SCORER", options.Scorer)
+ }
+ if options.AddScores {
+ args = append(args, "ADDSCORES")
+ }
+ if options.LoadAll && options.Load != nil {
+ panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")
+ }
+ if options.LoadAll {
+ args = append(args, "LOAD", "*")
+ }
+ if options.Load != nil {
+ args = append(args, "LOAD", len(options.Load))
+ index, count := len(args)-1, 0
+ for _, load := range options.Load {
+ args = append(args, load.Field)
+ count++
+ if load.As != "" {
+ args = append(args, "AS", load.As)
+ count += 2
+ }
+ }
+ args[index] = count
+ }
+ if options.Timeout > 0 {
+ args = append(args, "TIMEOUT", options.Timeout)
+ }
+ for _, apply := range options.Apply {
+ args = append(args, "APPLY", apply.Field)
+ if apply.As != "" {
+ args = append(args, "AS", apply.As)
+ }
+ }
+ if options.GroupBy != nil {
+ for _, groupBy := range options.GroupBy {
+ args = append(args, "GROUPBY", len(groupBy.Fields))
+ args = append(args, groupBy.Fields...)
+
+ for _, reducer := range groupBy.Reduce {
+ args = append(args, "REDUCE")
+ args = append(args, reducer.Reducer.String())
+ if reducer.Args != nil {
+ args = append(args, len(reducer.Args))
+ args = append(args, reducer.Args...)
+ } else {
+ args = append(args, 0)
+ }
+ if reducer.As != "" {
+ args = append(args, "AS", reducer.As)
+ }
+ }
+ }
+ }
+ if options.SortBy != nil {
+ args = append(args, "SORTBY")
+ sortByOptions := []interface{}{}
+ for _, sortBy := range options.SortBy {
+ sortByOptions = append(sortByOptions, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.AGGREGATE: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ sortByOptions = append(sortByOptions, "ASC")
+ }
+ if sortBy.Desc {
+ sortByOptions = append(sortByOptions, "DESC")
+ }
+ }
+ args = append(args, len(sortByOptions))
+ args = append(args, sortByOptions...)
+ }
+ if options.SortByMax > 0 {
+ args = append(args, "MAX", options.SortByMax)
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ args = append(args, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Filter != "" {
+ args = append(args, "FILTER", options.Filter)
+ }
+ if options.WithCursor {
+ args = append(args, "WITHCURSOR")
+ if options.WithCursorOptions != nil {
+ if options.WithCursorOptions.Count > 0 {
+ args = append(args, "COUNT", options.WithCursorOptions.Count)
+ }
+ if options.WithCursorOptions.MaxIdle > 0 {
+ args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle)
+ }
+ }
+ }
+ if options.Params != nil {
+ args = append(args, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ args = append(args, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ args = append(args, "DIALECT", options.DialectVersion)
+ }
+ }
+
+ cmd := NewAggregateCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasAdd - Adds an alias to an index.
+// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/)
+func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd {
+ args := []interface{}{"FT.ALIASADD", alias, index}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasDel - Removes an alias from an index.
+// The 'alias' parameter specifies the alias to be removed.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/)
+func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasUpdate - Updates an alias to an index.
+// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias.
+// If the alias already exists for a different index, it updates the alias to point to the specified index instead.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/)
+func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAlter - Alters the definition of an existing index.
+// The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan.
+// The 'definition' parameter specifies the new definition for the index.
+// For more information, please refer to the Redis documentation:
+// [FT.ALTER]: (https://redis.io/commands/ft.alter/)
+func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd {
+ args := []interface{}{"FT.ALTER", index}
+ if skipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ args = append(args, "SCHEMA", "ADD")
+ args = append(args, definition...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTConfigGet - Retrieves the value of a RediSearch configuration parameter.
+// The 'option' parameter specifies the configuration parameter to retrieve.
+// For more information, please refer to the Redis documentation:
+// [FT.CONFIG GET]: (https://redis.io/commands/ft.config-get/)
+func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd {
+ cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTConfigSet - Sets the value of a RediSearch configuration parameter.
+// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value.
+// For more information, please refer to the Redis documentation:
+// [FT.CONFIG SET]: (https://redis.io/commands/ft.config-set/)
+func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCreate - Creates a new index with the given options and schema.
+// The 'index' parameter specifies the name of the index to create.
+// The 'options' parameter specifies various options for the index, such as:
+// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc.
+// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc.
+// For more information, please refer to the Redis documentation:
+// [FT.CREATE]: (https://redis.io/commands/ft.create/)
+func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd {
+ args := []interface{}{"FT.CREATE", index}
+ if options != nil {
+ if options.OnHash && !options.OnJSON {
+ args = append(args, "ON", "HASH")
+ }
+ if options.OnJSON && !options.OnHash {
+ args = append(args, "ON", "JSON")
+ }
+ if options.OnHash && options.OnJSON {
+ panic("FT.CREATE: ON HASH and ON JSON are mutually exclusive")
+ }
+ if options.Prefix != nil {
+ args = append(args, "PREFIX", len(options.Prefix))
+ args = append(args, options.Prefix...)
+ }
+ if options.Filter != "" {
+ args = append(args, "FILTER", options.Filter)
+ }
+ if options.DefaultLanguage != "" {
+ args = append(args, "LANGUAGE", options.DefaultLanguage)
+ }
+ if options.LanguageField != "" {
+ args = append(args, "LANGUAGE_FIELD", options.LanguageField)
+ }
+ if options.Score > 0 {
+ args = append(args, "SCORE", options.Score)
+ }
+ if options.ScoreField != "" {
+ args = append(args, "SCORE_FIELD", options.ScoreField)
+ }
+ if options.PayloadField != "" {
+ args = append(args, "PAYLOAD_FIELD", options.PayloadField)
+ }
+ if options.MaxTextFields > 0 {
+ args = append(args, "MAXTEXTFIELDS", options.MaxTextFields)
+ }
+ if options.NoOffsets {
+ args = append(args, "NOOFFSETS")
+ }
+ if options.Temporary > 0 {
+ args = append(args, "TEMPORARY", options.Temporary)
+ }
+ if options.NoHL {
+ args = append(args, "NOHL")
+ }
+ if options.NoFields {
+ args = append(args, "NOFIELDS")
+ }
+ if options.NoFreqs {
+ args = append(args, "NOFREQS")
+ }
+ if options.StopWords != nil {
+ args = append(args, "STOPWORDS", len(options.StopWords))
+ args = append(args, options.StopWords...)
+ }
+ if options.SkipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ }
+ if schema == nil {
+ panic("FT.CREATE: SCHEMA is required")
+ }
+ args = append(args, "SCHEMA")
+ for _, schema := range schema {
+ if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid {
+ panic("FT.CREATE: SCHEMA FieldName and FieldType are required")
+ }
+ args = append(args, schema.FieldName)
+ if schema.As != "" {
+ args = append(args, "AS", schema.As)
+ }
+ args = append(args, schema.FieldType.String())
+ if schema.VectorArgs != nil {
+ if schema.FieldType != SearchFieldTypeVector {
+ panic("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs")
+ }
+ if schema.VectorArgs.FlatOptions != nil && schema.VectorArgs.HNSWOptions != nil {
+ panic("FT.CREATE: SCHEMA VectorArgs FlatOptions and HNSWOptions are mutually exclusive")
+ }
+ if schema.VectorArgs.FlatOptions != nil {
+ args = append(args, "FLAT")
+ if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" {
+ panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT")
+ }
+ flatArgs := []interface{}{
+ "TYPE", schema.VectorArgs.FlatOptions.Type,
+ "DIM", schema.VectorArgs.FlatOptions.Dim,
+ "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric,
+ }
+ if schema.VectorArgs.FlatOptions.InitialCapacity > 0 {
+ flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity)
+ }
+ if schema.VectorArgs.FlatOptions.BlockSize > 0 {
+ flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize)
+ }
+ args = append(args, len(flatArgs))
+ args = append(args, flatArgs...)
+ }
+ if schema.VectorArgs.HNSWOptions != nil {
+ args = append(args, "HNSW")
+ if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" {
+ panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW")
+ }
+ hnswArgs := []interface{}{
+ "TYPE", schema.VectorArgs.HNSWOptions.Type,
+ "DIM", schema.VectorArgs.HNSWOptions.Dim,
+ "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric,
+ }
+ if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 {
+ hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity)
+ }
+ if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 {
+ hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode)
+ }
+ if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 {
+ hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode)
+ }
+ if schema.VectorArgs.HNSWOptions.EFRunTime > 0 {
+ hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime)
+ }
+ if schema.VectorArgs.HNSWOptions.Epsilon > 0 {
+ hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon)
+ }
+ args = append(args, len(hnswArgs))
+ args = append(args, hnswArgs...)
+ }
+ }
+ if schema.GeoShapeFieldType != "" {
+ if schema.FieldType != SearchFieldTypeGeoShape {
+ panic("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType")
+ }
+ args = append(args, schema.GeoShapeFieldType)
+ }
+ if schema.NoStem {
+ args = append(args, "NOSTEM")
+ }
+ if schema.Sortable {
+ args = append(args, "SORTABLE")
+ }
+ if schema.UNF {
+ args = append(args, "UNF")
+ }
+ if schema.NoIndex {
+ args = append(args, "NOINDEX")
+ }
+ if schema.PhoneticMatcher != "" {
+ args = append(args, "PHONETIC", schema.PhoneticMatcher)
+ }
+ if schema.Weight > 0 {
+ args = append(args, "WEIGHT", schema.Weight)
+ }
+ if schema.Separator != "" {
+ args = append(args, "SEPARATOR", schema.Separator)
+ }
+ if schema.CaseSensitive {
+ args = append(args, "CASESENSITIVE")
+ }
+ if schema.WithSuffixtrie {
+ args = append(args, "WITHSUFFIXTRIE")
+ }
+ if schema.IndexEmpty {
+ args = append(args, "INDEXEMPTY")
+ }
+ if schema.IndexMissing {
+ args = append(args, "INDEXMISSING")
+
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCursorDel - Deletes a cursor from an existing index.
+// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/)
+func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCursorRead - Reads the next results from an existing cursor.
+// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read.
+// For more information, please refer to the Redis documentation:
+// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/)
+func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd {
+ args := []interface{}{"FT.CURSOR", "READ", index, cursorId}
+ if count > 0 {
+ args = append(args, "COUNT", count)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictAdd - Adds terms to a dictionary.
+// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/)
+func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd {
+ args := []interface{}{"FT.DICTADD", dict}
+ args = append(args, term...)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictDel - Deletes terms from a dictionary.
+// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/)
+func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd {
+ args := []interface{}{"FT.DICTDEL", dict}
+ args = append(args, term...)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictDump - Returns all terms in the specified dictionary.
+// The 'dict' parameter specifies the dictionary from which to return the terms.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/)
+func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDropIndex - Deletes an index.
+// The 'index' parameter specifies the index to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/)
+func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd {
+ args := []interface{}{"FT.DROPINDEX", index}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDropIndexWithArgs - Deletes an index with options.
+// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion.
+// For more information, please refer to the Redis documentation:
+// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/)
+func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd {
+ args := []interface{}{"FT.DROPINDEX", index}
+ if options != nil {
+ if options.DeleteDocs {
+ args = append(args, "DD")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplain - Returns the execution plan for a complex query.
+// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string.
+// For more information, please refer to the Redis documentation:
+// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/)
+func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd {
+ cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplainWithArgs - Returns the execution plan for a complex query with options.
+// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query.
+// For more information, please refer to the Redis documentation:
+// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/)
+func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd {
+ args := []interface{}{"FT.EXPLAIN", index, query}
+ if options.Dialect != "" {
+ args = append(args, "DIALECT", options.Dialect)
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented]
+// For more information, see https://redis.io/commands/ft.explaincli/
+func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error {
+ panic("not implemented")
+}
+
+func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) {
+ var ftInfo FTInfoResult
+ // Manually parse each field from the map
+ if indexErrors, ok := data["Index Errors"].([]interface{}); ok {
+ ftInfo.IndexErrors = IndexErrors{
+ IndexingFailures: internal.ToInteger(indexErrors[1]),
+ LastIndexingError: internal.ToString(indexErrors[3]),
+ LastIndexingErrorKey: internal.ToString(indexErrors[5]),
+ }
+ }
+
+ if attributes, ok := data["attributes"].([]interface{}); ok {
+ for _, attr := range attributes {
+ if attrMap, ok := attr.([]interface{}); ok {
+ att := FTAttribute{}
+ for i := 0; i < len(attrMap); i++ {
+ if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" {
+ att.Attribute = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" {
+ att.Identifier = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "type" {
+ att.Type = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "weight" {
+ att.Weight = internal.ToFloat(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" {
+ att.NoStem = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" {
+ att.Sortable = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" {
+ att.NoIndex = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "unf" {
+ att.UNF = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" {
+ att.PhoneticMatcher = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" {
+ att.CaseSensitive = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" {
+ att.WithSuffixtrie = true
+ continue
+ }
+
+ }
+ ftInfo.Attributes = append(ftInfo.Attributes, att)
+ }
+ }
+ }
+
+ ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"])
+ ftInfo.Cleaning = internal.ToInteger(data["cleaning"])
+
+ if cursorStats, ok := data["cursor_stats"].([]interface{}); ok {
+ ftInfo.CursorStats = CursorStats{
+ GlobalIdle: internal.ToInteger(cursorStats[1]),
+ GlobalTotal: internal.ToInteger(cursorStats[3]),
+ IndexCapacity: internal.ToInteger(cursorStats[5]),
+ IndexTotal: internal.ToInteger(cursorStats[7]),
+ }
+ }
+
+ if dialectStats, ok := data["dialect_stats"].([]interface{}); ok {
+ ftInfo.DialectStats = make(map[string]int)
+ for i := 0; i < len(dialectStats); i += 2 {
+ ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1])
+ }
+ }
+
+ ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"])
+
+ if fieldStats, ok := data["field statistics"].([]interface{}); ok {
+ for _, stat := range fieldStats {
+ if statMap, ok := stat.([]interface{}); ok {
+ ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{
+ Identifier: internal.ToString(statMap[1]),
+ Attribute: internal.ToString(statMap[3]),
+ IndexErrors: IndexErrors{
+ IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]),
+ LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]),
+ LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]),
+ },
+ })
+ }
+ }
+ }
+
+ if gcStats, ok := data["gc_stats"].([]interface{}); ok {
+ ftInfo.GCStats = GCStats{}
+ for i := 0; i < len(gcStats); i += 2 {
+ if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" {
+ ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" {
+ ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" {
+ ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" {
+ ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" {
+ ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" {
+ ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" {
+ ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ }
+ }
+
+ ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"])
+ ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"])
+
+ if indexDef, ok := data["index_definition"].([]interface{}); ok {
+ ftInfo.IndexDefinition = IndexDefinition{
+ KeyType: internal.ToString(indexDef[1]),
+ Prefixes: internal.ToStringSlice(indexDef[3]),
+ DefaultScore: internal.ToFloat(indexDef[5]),
+ }
+ }
+
+ ftInfo.IndexName = internal.ToString(data["index_name"])
+ ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{}))
+ ftInfo.Indexing = internal.ToInteger(data["indexing"])
+ ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"])
+ ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"])
+ ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"])
+ ftInfo.NumDocs = internal.ToInteger(data["num_docs"])
+ ftInfo.NumRecords = internal.ToInteger(data["num_records"])
+ ftInfo.NumTerms = internal.ToInteger(data["num_terms"])
+ ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"])
+ ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"])
+ ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"])
+ ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"])
+ ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"])
+ ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"])
+ ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"])
+ ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"])
+ ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"])
+ ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"])
+ ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"])
+ ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"])
+ ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"])
+
+ return ftInfo, nil
+}
+
+type FTInfoCmd struct {
+ baseCmd
+ val FTInfoResult
+}
+
+func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd {
+ return &FTInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTInfoCmd) SetVal(val FTInfoResult) {
+ cmd.val = val
+}
+
+func (cmd *FTInfoCmd) Result() (FTInfoResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTInfoCmd) Val() FTInfoResult {
+ return cmd.val
+}
+
+func (cmd *FTInfoCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTInfoCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ data := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ data[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ data[k] = err
+ continue
+ }
+ return err
+ }
+ data[k] = v
+ }
+ cmd.val, err = parseFTInfo(data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// FTInfo - Retrieves information about an index.
+// The 'index' parameter specifies the index to retrieve information about.
+// For more information, please refer to the Redis documentation:
+// [FT.INFO]: (https://redis.io/commands/ft.info/)
+func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd {
+ cmd := newFTInfoCmd(ctx, "FT.INFO", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSpellCheck - Checks a query string for spelling errors.
+// For more details about spellcheck query please follow:
+// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/
+// For more information, please refer to the Redis documentation:
+// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/)
+func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd {
+ args := []interface{}{"FT.SPELLCHECK", index, query}
+ cmd := newFTSpellCheckCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options.
+// For more details about spellcheck query please follow:
+// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/
+// For more information, please refer to the Redis documentation:
+// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/)
+func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd {
+ args := []interface{}{"FT.SPELLCHECK", index, query}
+ if options != nil {
+ if options.Distance > 0 {
+ args = append(args, "DISTANCE", options.Distance)
+ }
+ if options.Terms != nil {
+ args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary)
+ args = append(args, options.Terms.Terms...)
+ }
+ if options.Dialect > 0 {
+ args = append(args, "DIALECT", options.Dialect)
+ }
+ }
+ cmd := newFTSpellCheckCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type FTSpellCheckCmd struct {
+ baseCmd
+ val []SpellCheckResult
+}
+
+func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd {
+ return &FTSpellCheckCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTSpellCheckCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult {
+ return cmd.val
+}
+
+func (cmd *FTSpellCheckCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = parseFTSpellCheck(data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) {
+ results := make([]SpellCheckResult, 0, len(data))
+
+ for _, termData := range data {
+ termInfo, ok := termData.([]interface{})
+ if !ok || len(termInfo) != 3 {
+ return nil, fmt.Errorf("invalid term format")
+ }
+
+ term, ok := termInfo[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid term format")
+ }
+
+ suggestionsData, ok := termInfo[2].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestions format")
+ }
+
+ suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData))
+ for _, suggestionData := range suggestionsData {
+ suggestionInfo, ok := suggestionData.([]interface{})
+ if !ok || len(suggestionInfo) != 2 {
+ return nil, fmt.Errorf("invalid suggestion format")
+ }
+
+ scoreStr, ok := suggestionInfo[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestion score format")
+ }
+ score, err := strconv.ParseFloat(scoreStr, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid suggestion score value")
+ }
+
+ suggestion, ok := suggestionInfo[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestion format")
+ }
+
+ suggestions = append(suggestions, SpellCheckSuggestion{
+ Score: score,
+ Suggestion: suggestion,
+ })
+ }
+
+ results = append(results, SpellCheckResult{
+ Term: term,
+ Suggestions: suggestions,
+ })
+ }
+
+ return results, nil
+}
+
+func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) {
+ if len(data) < 1 {
+ return FTSearchResult{}, fmt.Errorf("unexpected search result format")
+ }
+
+ total, ok := data[0].(int64)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid total results format")
+ }
+
+ var results []Document
+ for i := 1; i < len(data); {
+ docID, ok := data[i].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid document ID format")
+ }
+
+ doc := Document{
+ ID: docID,
+ Fields: make(map[string]string),
+ }
+ i++
+
+ if noContent {
+ results = append(results, doc)
+ continue
+ }
+
+ if withScores && i < len(data) {
+ if scoreStr, ok := data[i].(string); ok {
+ score, err := strconv.ParseFloat(scoreStr, 64)
+ if err != nil {
+ return FTSearchResult{}, fmt.Errorf("invalid score format")
+ }
+ doc.Score = &score
+ i++
+ }
+ }
+
+ if withPayloads && i < len(data) {
+ if payload, ok := data[i].(string); ok {
+ doc.Payload = &payload
+ i++
+ }
+ }
+
+ if withSortKeys && i < len(data) {
+ if sortKey, ok := data[i].(string); ok {
+ doc.SortKey = &sortKey
+ i++
+ }
+ }
+
+ if i < len(data) {
+ fields, ok := data[i].([]interface{})
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid document fields format")
+ }
+
+ for j := 0; j < len(fields); j += 2 {
+ key, ok := fields[j].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid field key format")
+ }
+ value, ok := fields[j+1].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid field value format")
+ }
+ doc.Fields[key] = value
+ }
+ i++
+ }
+
+ results = append(results, doc)
+ }
+ return FTSearchResult{
+ Total: int(total),
+ Docs: results,
+ }, nil
+}
+
+type FTSearchCmd struct {
+ baseCmd
+ val FTSearchResult
+ options *FTSearchOptions
+}
+
+func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd {
+ return &FTSearchCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ options: options,
+ }
+}
+
+func (cmd *FTSearchCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSearchCmd) SetVal(val FTSearchResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSearchCmd) Result() (FTSearchResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSearchCmd) Val() FTSearchResult {
+ return cmd.val
+}
+
+func (cmd *FTSearchCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSearchCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// FTSearch - Executes a search query on an index.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd {
+ args := []interface{}{"FT.SEARCH", index, query}
+ cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type SearchQuery []interface{}
+
+// FTSearchQuery - Executes a search query on an index with additional options.
+// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query,
+// and the 'options' parameter specifies additional options for the search.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery {
+ queryArgs := []interface{}{query}
+ if options != nil {
+ if options.NoContent {
+ queryArgs = append(queryArgs, "NOCONTENT")
+ }
+ if options.Verbatim {
+ queryArgs = append(queryArgs, "VERBATIM")
+ }
+ if options.NoStopWords {
+ queryArgs = append(queryArgs, "NOSTOPWORDS")
+ }
+ if options.WithScores {
+ queryArgs = append(queryArgs, "WITHSCORES")
+ }
+ if options.WithPayloads {
+ queryArgs = append(queryArgs, "WITHPAYLOADS")
+ }
+ if options.WithSortKeys {
+ queryArgs = append(queryArgs, "WITHSORTKEYS")
+ }
+ if options.Filters != nil {
+ for _, filter := range options.Filters {
+ queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max)
+ }
+ }
+ if options.GeoFilter != nil {
+ for _, geoFilter := range options.GeoFilter {
+ queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit)
+ }
+ }
+ if options.InKeys != nil {
+ queryArgs = append(queryArgs, "INKEYS", len(options.InKeys))
+ queryArgs = append(queryArgs, options.InKeys...)
+ }
+ if options.InFields != nil {
+ queryArgs = append(queryArgs, "INFIELDS", len(options.InFields))
+ queryArgs = append(queryArgs, options.InFields...)
+ }
+ if options.Return != nil {
+ queryArgs = append(queryArgs, "RETURN")
+ queryArgsReturn := []interface{}{}
+ for _, ret := range options.Return {
+ queryArgsReturn = append(queryArgsReturn, ret.FieldName)
+ if ret.As != "" {
+ queryArgsReturn = append(queryArgsReturn, "AS", ret.As)
+ }
+ }
+ queryArgs = append(queryArgs, len(queryArgsReturn))
+ queryArgs = append(queryArgs, queryArgsReturn...)
+ }
+ if options.Slop > 0 {
+ queryArgs = append(queryArgs, "SLOP", options.Slop)
+ }
+ if options.Timeout > 0 {
+ queryArgs = append(queryArgs, "TIMEOUT", options.Timeout)
+ }
+ if options.InOrder {
+ queryArgs = append(queryArgs, "INORDER")
+ }
+ if options.Language != "" {
+ queryArgs = append(queryArgs, "LANGUAGE", options.Language)
+ }
+ if options.Expander != "" {
+ queryArgs = append(queryArgs, "EXPANDER", options.Expander)
+ }
+ if options.Scorer != "" {
+ queryArgs = append(queryArgs, "SCORER", options.Scorer)
+ }
+ if options.ExplainScore {
+ queryArgs = append(queryArgs, "EXPLAINSCORE")
+ }
+ if options.Payload != "" {
+ queryArgs = append(queryArgs, "PAYLOAD", options.Payload)
+ }
+ if options.SortBy != nil {
+ queryArgs = append(queryArgs, "SORTBY")
+ for _, sortBy := range options.SortBy {
+ queryArgs = append(queryArgs, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.SEARCH: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ queryArgs = append(queryArgs, "ASC")
+ }
+ if sortBy.Desc {
+ queryArgs = append(queryArgs, "DESC")
+ }
+ }
+ if options.SortByWithCount {
+ queryArgs = append(queryArgs, "WITHCOUNT")
+ }
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Params != nil {
+ queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ queryArgs = append(queryArgs, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ queryArgs = append(queryArgs, "DIALECT", options.DialectVersion)
+ }
+ }
+ return queryArgs
+}
+
+// FTSearchWithArgs - Executes a search query on an index with additional options.
+// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query,
+// and the 'options' parameter specifies additional options for the search.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd {
+ args := []interface{}{"FT.SEARCH", index, query}
+ if options != nil {
+ if options.NoContent {
+ args = append(args, "NOCONTENT")
+ }
+ if options.Verbatim {
+ args = append(args, "VERBATIM")
+ }
+ if options.NoStopWords {
+ args = append(args, "NOSTOPWORDS")
+ }
+ if options.WithScores {
+ args = append(args, "WITHSCORES")
+ }
+ if options.WithPayloads {
+ args = append(args, "WITHPAYLOADS")
+ }
+ if options.WithSortKeys {
+ args = append(args, "WITHSORTKEYS")
+ }
+ if options.Filters != nil {
+ for _, filter := range options.Filters {
+ args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max)
+ }
+ }
+ if options.GeoFilter != nil {
+ for _, geoFilter := range options.GeoFilter {
+ args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit)
+ }
+ }
+ if options.InKeys != nil {
+ args = append(args, "INKEYS", len(options.InKeys))
+ args = append(args, options.InKeys...)
+ }
+ if options.InFields != nil {
+ args = append(args, "INFIELDS", len(options.InFields))
+ args = append(args, options.InFields...)
+ }
+ if options.Return != nil {
+ args = append(args, "RETURN")
+ argsReturn := []interface{}{}
+ for _, ret := range options.Return {
+ argsReturn = append(argsReturn, ret.FieldName)
+ if ret.As != "" {
+ argsReturn = append(argsReturn, "AS", ret.As)
+ }
+ }
+ args = append(args, len(argsReturn))
+ args = append(args, argsReturn...)
+ }
+ if options.Slop > 0 {
+ args = append(args, "SLOP", options.Slop)
+ }
+ if options.Timeout > 0 {
+ args = append(args, "TIMEOUT", options.Timeout)
+ }
+ if options.InOrder {
+ args = append(args, "INORDER")
+ }
+ if options.Language != "" {
+ args = append(args, "LANGUAGE", options.Language)
+ }
+ if options.Expander != "" {
+ args = append(args, "EXPANDER", options.Expander)
+ }
+ if options.Scorer != "" {
+ args = append(args, "SCORER", options.Scorer)
+ }
+ if options.ExplainScore {
+ args = append(args, "EXPLAINSCORE")
+ }
+ if options.Payload != "" {
+ args = append(args, "PAYLOAD", options.Payload)
+ }
+ if options.SortBy != nil {
+ args = append(args, "SORTBY")
+ for _, sortBy := range options.SortBy {
+ args = append(args, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.SEARCH: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ args = append(args, "ASC")
+ }
+ if sortBy.Desc {
+ args = append(args, "DESC")
+ }
+ }
+ if options.SortByWithCount {
+ args = append(args, "WITHCOUNT")
+ }
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ args = append(args, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Params != nil {
+ args = append(args, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ args = append(args, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ args = append(args, "DIALECT", options.DialectVersion)
+ }
+ }
+ cmd := newFTSearchCmd(ctx, options, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd {
+ return &FTSynDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTSynDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult {
+ return cmd.val
+}
+
+func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSynDumpCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error {
+ termSynonymPairs, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+
+ var results []FTSynDumpResult
+ for i := 0; i < len(termSynonymPairs); i += 2 {
+ term, ok := termSynonymPairs[i].(string)
+ if !ok {
+ return fmt.Errorf("invalid term format")
+ }
+
+ synonyms, ok := termSynonymPairs[i+1].([]interface{})
+ if !ok {
+ return fmt.Errorf("invalid synonyms format")
+ }
+
+ synonymList := make([]string, len(synonyms))
+ for j, syn := range synonyms {
+ synonym, ok := syn.(string)
+ if !ok {
+ return fmt.Errorf("invalid synonym format")
+ }
+ synonymList[j] = synonym
+ }
+
+ results = append(results, FTSynDumpResult{
+ Term: term,
+ Synonyms: synonymList,
+ })
+ }
+
+ cmd.val = results
+ return nil
+}
+
+// FTSynDump - Dumps the contents of a synonym group.
+// The 'index' parameter specifies the index to dump.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/)
+func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd {
+ cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSynUpdate - Creates or updates a synonym group with additional terms.
+// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/)
+func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd {
+ args := []interface{}{"FT.SYNUPDATE", index, synGroupId}
+ args = append(args, terms...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options.
+// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/)
+func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd {
+ args := []interface{}{"FT.SYNUPDATE", index, synGroupId}
+ if options.SkipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ args = append(args, terms...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTTagVals - Returns all distinct values indexed in a tag field.
+// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from.
+// For more information, please refer to the Redis documentation:
+// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/)
+func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// type FTProfileResult struct {
+// Results []interface{}
+// Profile ProfileDetails
+// }
+
+// type ProfileDetails struct {
+// TotalProfileTime string
+// ParsingTime string
+// PipelineCreationTime string
+// Warning string
+// IteratorsProfile []IteratorProfile
+// ResultProcessorsProfile []ResultProcessorProfile
+// }
+
+// type IteratorProfile struct {
+// Type string
+// QueryType string
+// Time interface{}
+// Counter int
+// Term string
+// Size int
+// ChildIterators []IteratorProfile
+// }
+
+// type ResultProcessorProfile struct {
+// Type string
+// Time interface{}
+// Counter int
+// }
+
+// func parseFTProfileResult(data []interface{}) (FTProfileResult, error) {
+// var result FTProfileResult
+// if len(data) < 2 {
+// return result, fmt.Errorf("unexpected data length")
+// }
+
+// // Parse results
+// result.Results = data[0].([]interface{})
+
+// // Parse profile details
+// profileData := data[1].([]interface{})
+// profileDetails := ProfileDetails{}
+// for i := 0; i < len(profileData); i += 2 {
+// switch profileData[i].(string) {
+// case "Total profile time":
+// profileDetails.TotalProfileTime = profileData[i+1].(string)
+// case "Parsing time":
+// profileDetails.ParsingTime = profileData[i+1].(string)
+// case "Pipeline creation time":
+// profileDetails.PipelineCreationTime = profileData[i+1].(string)
+// case "Warning":
+// profileDetails.Warning = profileData[i+1].(string)
+// case "Iterators profile":
+// profileDetails.IteratorsProfile = parseIteratorsProfile(profileData[i+1].([]interface{}))
+// case "Result processors profile":
+// profileDetails.ResultProcessorsProfile = parseResultProcessorsProfile(profileData[i+1].([]interface{}))
+// }
+// }
+
+// result.Profile = profileDetails
+// return result, nil
+// }
+
+// func parseIteratorsProfile(data []interface{}) []IteratorProfile {
+// var iterators []IteratorProfile
+// for _, item := range data {
+// profile := item.([]interface{})
+// iterator := IteratorProfile{}
+// for i := 0; i < len(profile); i += 2 {
+// switch profile[i].(string) {
+// case "Type":
+// iterator.Type = profile[i+1].(string)
+// case "Query type":
+// iterator.QueryType = profile[i+1].(string)
+// case "Time":
+// iterator.Time = profile[i+1]
+// case "Counter":
+// iterator.Counter = int(profile[i+1].(int64))
+// case "Term":
+// iterator.Term = profile[i+1].(string)
+// case "Size":
+// iterator.Size = int(profile[i+1].(int64))
+// case "Child iterators":
+// iterator.ChildIterators = parseChildIteratorsProfile(profile[i+1].([]interface{}))
+// }
+// }
+// iterators = append(iterators, iterator)
+// }
+// return iterators
+// }
+
+// func parseChildIteratorsProfile(data []interface{}) []IteratorProfile {
+// var iterators []IteratorProfile
+// for _, item := range data {
+// profile := item.([]interface{})
+// iterator := IteratorProfile{}
+// for i := 0; i < len(profile); i += 2 {
+// switch profile[i].(string) {
+// case "Type":
+// iterator.Type = profile[i+1].(string)
+// case "Query type":
+// iterator.QueryType = profile[i+1].(string)
+// case "Time":
+// iterator.Time = profile[i+1]
+// case "Counter":
+// iterator.Counter = int(profile[i+1].(int64))
+// case "Term":
+// iterator.Term = profile[i+1].(string)
+// case "Size":
+// iterator.Size = int(profile[i+1].(int64))
+// }
+// }
+// iterators = append(iterators, iterator)
+// }
+// return iterators
+// }
+
+// func parseResultProcessorsProfile(data []interface{}) []ResultProcessorProfile {
+// var processors []ResultProcessorProfile
+// for _, item := range data {
+// profile := item.([]interface{})
+// processor := ResultProcessorProfile{}
+// for i := 0; i < len(profile); i += 2 {
+// switch profile[i].(string) {
+// case "Type":
+// processor.Type = profile[i+1].(string)
+// case "Time":
+// processor.Time = profile[i+1]
+// case "Counter":
+// processor.Counter = int(profile[i+1].(int64))
+// }
+// }
+// processors = append(processors, processor)
+// }
+// return processors
+// }
+
+// func NewFTProfileCmd(ctx context.Context, args ...interface{}) *FTProfileCmd {
+// return &FTProfileCmd{
+// baseCmd: baseCmd{
+// ctx: ctx,
+// args: args,
+// },
+// }
+// }
+
+// type FTProfileCmd struct {
+// baseCmd
+// val FTProfileResult
+// }
+
+// func (cmd *FTProfileCmd) String() string {
+// return cmdString(cmd, cmd.val)
+// }
+
+// func (cmd *FTProfileCmd) SetVal(val FTProfileResult) {
+// cmd.val = val
+// }
+
+// func (cmd *FTProfileCmd) Result() (FTProfileResult, error) {
+// return cmd.val, cmd.err
+// }
+
+// func (cmd *FTProfileCmd) Val() FTProfileResult {
+// return cmd.val
+// }
+
+// func (cmd *FTProfileCmd) readReply(rd *proto.Reader) (err error) {
+// data, err := rd.ReadSlice()
+// if err != nil {
+// return err
+// }
+// cmd.val, err = parseFTProfileResult(data)
+// if err != nil {
+// cmd.err = err
+// }
+// return nil
+// }
+
+// // FTProfile - Executes a search query and returns a profile of how the query was processed.
+// // The 'index' parameter specifies the index to search, the 'limited' parameter specifies whether to limit the results,
+// // and the 'query' parameter specifies the search / aggreagte query. Please notice that you must either pass a SearchQuery or an AggregateQuery.
+// // For more information, please refer to the Redis documentation:
+// // [FT.PROFILE]: (https://redis.io/commands/ft.profile/)
+// func (c cmdable) FTProfile(ctx context.Context, index string, limited bool, query interface{}) *FTProfileCmd {
+// queryType := ""
+// var argsQuery []interface{}
+
+// switch v := query.(type) {
+// case AggregateQuery:
+// queryType = "AGGREGATE"
+// argsQuery = v
+// case SearchQuery:
+// queryType = "SEARCH"
+// argsQuery = v
+// default:
+// panic("FT.PROFILE: query must be either AggregateQuery or SearchQuery")
+// }
+
+// args := []interface{}{"FT.PROFILE", index, queryType}
+
+// if limited {
+// args = append(args, "LIMITED")
+// }
+// args = append(args, "QUERY")
+// args = append(args, argsQuery...)
+
+// cmd := NewFTProfileCmd(ctx, args...)
+// _ = c(ctx, cmd)
+// return cmd
+// }
diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go
new file mode 100644
index 0000000..3156955
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sentinel.go
@@ -0,0 +1,841 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // If specified with SentinelPassword, enables ACL-based authentication (via
+ // AUTH ).
+ SentinelUsername string
+ // Sentinel password from "requirepass " (if enabled) in Sentinel
+ // configuration, or, if SentinelUsername is also supplied, used for ACL-based
+ // authentication.
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to replica read-only nodes.
+ ReplicaOnly bool
+
+ // Use replicas disconnected with master when cannot get connected replicas
+ // Now, this option only works in RandomReplicaAddr function.
+ UseDisconnectedReplicas bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ DisableIndentity bool
+ IdentitySuffix string
+ UnstableResp3 bool
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Username: opt.SentinelUsername,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+ sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+ })
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterReplicaDialer(failover)
+ opt.init()
+
+ var connPool *pool.ConnPool
+
+ rdb := &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ rdb.init()
+
+ connPool = newConnPool(opt, rdb.dialHook)
+ rdb.connPool = connPool
+ rdb.onClose = failover.Close
+
+ failover.mu.Lock()
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+ failover.mu.Unlock()
+
+ return rdb
+}
+
+func masterReplicaDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.ReplicaOnly {
+ addr, err = failover.RandomReplicaAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+
+ netDialer := &net.Dialer{
+ Timeout: failover.opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if failover.opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooksMixin
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ })
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return c
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every replica and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Replicas shows a list of replicas for the specified master and their state.
+func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
+ if c.opt == nil {
+ return "", errors.New("opt is nil")
+ }
+
+ addresses, err := c.replicaAddrs(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
+ addresses, err = c.replicaAddrs(ctx, true)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ } else {
+ // No error and no replicas.
+ _ = c.closeSentinel()
+ }
+ }
+
+ var sentinelReachable bool
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
+ c.opt.MasterName, err)
+ continue
+ }
+ sentinelReachable = true
+ addrs := parseReplicaAddrs(replicas, useDisconnected)
+ if len(addrs) == 0 {
+ continue
+ }
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ return addrs, nil
+ }
+
+ if sentinelReachable {
+ return []string{}, nil
+ }
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ return "", err
+ }
+ return net.JoinHostPort(addr[0], addr[1]), nil
+}
+
+func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
+ addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ return nil, err
+ }
+ return parseReplicaAddrs(addrs, false), nil
+}
+
+func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
+ nodes := make([]string, 0, len(addrs))
+ for _, node := range addrs {
+ isDown := false
+ if flags, ok := node["flags"]; ok {
+ for _, flag := range strings.Split(flags, ",") {
+ switch flag {
+ case "s_down", "o_down":
+ isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
+ }
+ }
+ }
+ if !isDown && node["ip"] != "" && node["port"] != "" {
+ nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr //nolint:ifshort
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ ip, ok := sentinel["ip"]
+ if !ok {
+ continue
+ }
+ port, ok := sentinel["port"]
+ if !ok {
+ continue
+ }
+ if ip != "" && port != "" {
+ sentinelAddr := net.JoinHostPort(ip, port)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a replica node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ replicaAddrs, err := failover.replicaAddrs(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, replicaAddr := range replicaAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: replicaAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+
+ failover.mu.Lock()
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+ failover.mu.Unlock()
+
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/set_commands.go b/vendor/github.com/redis/go-redis/v9/set_commands.go
new file mode 100644
index 0000000..cef8ad6
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/set_commands.go
@@ -0,0 +1,217 @@
+package redis
+
+import "context"
+
+type SetCmdable interface {
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
new file mode 100644
index 0000000..6701402
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
@@ -0,0 +1,772 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type SortedSetCmdable interface {
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
+}
+
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ XX: true,
+ Members: members,
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by and are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the and options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of and .
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go
new file mode 100644
index 0000000..6d7b229
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go
@@ -0,0 +1,450 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StreamCmdable interface {
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+}
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ ID string
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 2*len(a.Streams)+6)
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+ if a.ID != "" {
+ for range a.Streams {
+ args = append(args, a.ID)
+ }
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/string_commands.go b/vendor/github.com/redis/go-redis/v9/string_commands.go
new file mode 100644
index 0000000..eff5880
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/string_commands.go
@@ -0,0 +1,303 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StringCmdable interface {
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEx`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
new file mode 100644
index 0000000..82d8cdf
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
@@ -0,0 +1,950 @@
+package redis
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type TimeseriesCmdable interface {
+ TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd
+ TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd
+ TSCreate(ctx context.Context, key string) *StatusCmd
+ TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd
+ TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd
+ TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd
+ TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd
+ TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd
+ TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd
+ TSGet(ctx context.Context, key string) *TSTimestampValueCmd
+ TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd
+ TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd
+ TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd
+ TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd
+ TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd
+ TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd
+ TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd
+ TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd
+ TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd
+ TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd
+ TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd
+}
+
+type TSOptions struct {
+ Retention int
+ ChunkSize int
+ Encoding string
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+type TSIncrDecrOptions struct {
+ Timestamp int64
+ Retention int
+ ChunkSize int
+ Uncompressed bool
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+
+type TSAlterOptions struct {
+ Retention int
+ ChunkSize int
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+
+type TSCreateRuleOptions struct {
+ alignTimestamp int64
+}
+
+type TSGetOptions struct {
+ Latest bool
+}
+
+type TSInfoOptions struct {
+ Debug bool
+}
+type Aggregator int
+
+const (
+ Invalid = Aggregator(iota)
+ Avg
+ Sum
+ Min
+ Max
+ Range
+ Count
+ First
+ Last
+ StdP
+ StdS
+ VarP
+ VarS
+ Twa
+)
+
+func (a Aggregator) String() string {
+ switch a {
+ case Invalid:
+ return ""
+ case Avg:
+ return "AVG"
+ case Sum:
+ return "SUM"
+ case Min:
+ return "MIN"
+ case Max:
+ return "MAX"
+ case Range:
+ return "RANGE"
+ case Count:
+ return "COUNT"
+ case First:
+ return "FIRST"
+ case Last:
+ return "LAST"
+ case StdP:
+ return "STD.P"
+ case StdS:
+ return "STD.S"
+ case VarP:
+ return "VAR.P"
+ case VarS:
+ return "VAR.S"
+ case Twa:
+ return "TWA"
+ default:
+ return ""
+ }
+}
+
+type TSRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSMRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMGetOptions struct {
+ Latest bool
+ WithLabels bool
+ SelectedLabels []interface{}
+}
+
+// TSAdd - Adds one or more observations to a t-digest sketch.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAddWithArgs - Adds one or more observations to a t-digest sketch.
+// This function also allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreate - Creates a new time-series key.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateWithArgs - Creates a new time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAlter - Alters an existing time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize and DuplicatePolicy.
+// For more information - https://redis.io/commands/ts.alter/
+func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd {
+ args := []interface{}{"TS.ALTER", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRule - Creates a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRuleWithArgs - Creates a compaction rule from sourceKey to destKey with additional option.
+// This function allows for specifying additional option such as:
+// alignTimestamp.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ if options != nil {
+ if options.alignTimestamp != 0 {
+ args = append(args, options.alignTimestamp)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrBy - Increments the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.INCRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrByWithArgs - Increments the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.INCRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrBy - Decrements the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.DECRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrByWithArgs - Decrements the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.DECRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDel - Deletes a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.del/
+func (c cmdable) TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd {
+ args := []interface{}{"TS.DEL", Key, fromTimestamp, toTimestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDeleteRule - Deletes a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.deleterule/
+func (c cmdable) TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd {
+ args := []interface{}{"TS.DELETERULE", sourceKey, destKey}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGetWithArgs - Gets the last sample of a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Latest.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ }
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGet - Gets the last sample of a time-series key.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGet(ctx context.Context, key string) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValue struct {
+ Timestamp int64
+ Value float64
+}
+type TSTimestampValueCmd struct {
+ baseCmd
+ val TSTimestampValue
+}
+
+func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd {
+ return &TSTimestampValueCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueCmd) SetVal(val TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueCmd) Result() (TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueCmd) Val() TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = TSTimestampValue{}
+ for i := 0; i < n; i++ {
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Timestamp = timestamp
+ cmd.val.Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSInfo - Returns information about a time-series key.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSInfoWithArgs - Returns information about a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Debug.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ if options != nil {
+ if options.Debug {
+ args = append(args, "DEBUG")
+ }
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMAdd - Adds multiple samples to multiple time-series keys.
+// It accepts a slice of 'ktv' slices, each containing exactly three elements: key, timestamp, and value.
+// This struct must be provided for this command to work.
+// For more information - https://redis.io/commands/ts.madd/
+func (c cmdable) TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd {
+ args := []interface{}{"TS.MADD"}
+ for _, ktv := range ktvSlices {
+ args = append(args, ktv...)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSQueryIndex - Returns all the keys matching the filter expression.
+// For more information - https://redis.io/commands/ts.queryindex/
+func (c cmdable) TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd {
+ args := []interface{}{"TS.QUERYINDEX"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRange - Returns a range of samples from a time-series key in reverse order.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRangeWithArgs - Returns a range of samples from a time-series key in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRange - Returns a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRangeWithArgs - Returns a range of samples from a time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValueSliceCmd struct {
+ baseCmd
+ val []TSTimestampValue
+}
+
+func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd {
+ return &TSTimestampValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueSliceCmd) SetVal(val []TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueSliceCmd) Result() ([]TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueSliceCmd) Val() []TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]TSTimestampValue, n)
+ for i := 0; i < n; i++ {
+ _, _ = rd.ReadArrayLen()
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Timestamp = timestamp
+ cmd.val[i].Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSMRange - Returns a range of samples from multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRangeWithArgs - Returns a range of samples from multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRange - Returns a range of samples from multiple time-series keys in reverse order.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRangeWithArgs - Returns a range of samples from multiple time-series keys in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGet - Returns the last sample of multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET", "FILTER"}
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGetWithArgs - Returns the last sample of multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, WithLabels and SelectedLabels.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET"}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go
new file mode 100644
index 0000000..039eaf3
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/tx.go
@@ -0,0 +1,151 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+//
+// If you don't need WATCH, use Pipeline instead.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooksMixin
+}
+
+func (c *Client) newTx() *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool),
+ },
+ hooksMixin: c.hooksMixin.clone(),
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx()
+ defer tx.Close(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
+ return err
+ }
+ }
+ return fn(tx)
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.processPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdsCopy := make([]Cmder, len(cmds)+2)
+ cmdsCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdsCopy[1:], cmds)
+ cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdsCopy
+}
diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go
new file mode 100644
index 0000000..47fda27
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/universal.go
@@ -0,0 +1,251 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ SentinelUsername string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+
+ MasterName string
+
+ DisableIndentity bool
+ IdentitySuffix string
+ UnstableResp3 bool
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+ ClientName: o.ClientName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelUsername: o.SentinelUsername,
+ SentinelPassword: o.SentinelPassword,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+ Cmdable
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ SSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}
diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go
new file mode 100644
index 0000000..a447a54
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+ return "9.7.1"
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/.editorconfig b/vendor/github.com/sagikazarmark/locafero/.editorconfig
new file mode 100644
index 0000000..6f944f5
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/.editorconfig
@@ -0,0 +1,21 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[*.nix]
+indent_size = 2
+
+[*.go]
+indent_style = tab
+
+[{*.yml,*.yaml}]
+indent_size = 2
diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc
new file mode 100644
index 0000000..3ce7171
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
+fi
+use flake . --impure
diff --git a/vendor/github.com/sagikazarmark/locafero/.gitignore b/vendor/github.com/sagikazarmark/locafero/.gitignore
new file mode 100644
index 0000000..8f07e60
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/.gitignore
@@ -0,0 +1,8 @@
+/.devenv/
+/.direnv/
+/.task/
+/bin/
+/build/
+/tmp/
+/var/
+/vendor/
diff --git a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml
new file mode 100644
index 0000000..829de2a
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml
@@ -0,0 +1,27 @@
+run:
+ timeout: 10m
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/sagikazarmark/locafero)
+ goimports:
+ local-prefixes: github.com/sagikazarmark/locafero
+ misspell:
+ locale: US
+ nolintlint:
+ allow-leading-space: false # require machine-readable nolint directives (with no leading space)
+ allow-unused: false # report any unused nolint directives
+ require-specific: false # don't require nolint directives to be specific about which linter is being skipped
+ revive:
+ confidence: 0
+
+linters:
+ enable:
+ - gci
+ - goimports
+ - misspell
+ - nolintlint
+ - revive
diff --git a/vendor/github.com/sagikazarmark/locafero/LICENSE b/vendor/github.com/sagikazarmark/locafero/LICENSE
new file mode 100644
index 0000000..a70b0f2
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2023 Márk Sági-Kazár
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md
new file mode 100644
index 0000000..a48e8e9
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/README.md
@@ -0,0 +1,37 @@
+# Finder library for [Afero](https://github.com/spf13/afero)
+
+[](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml)
+[](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero)
+
+[](https://builtwithnix.org)
+
+**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).**
+
+> [!WARNING]
+> This is an experimental library under development.
+>
+> **Backwards compatibility is not guaranteed, expect breaking changes.**
+
+## Installation
+
+```shell
+go get github.com/sagikazarmark/locafero
+```
+
+## Usage
+
+Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev.
+
+## Development
+
+**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).**
+
+Run the test suite:
+
+```shell
+just test
+```
+
+## License
+
+The project is licensed under the [MIT License](LICENSE).
diff --git a/vendor/github.com/sagikazarmark/locafero/file_type.go b/vendor/github.com/sagikazarmark/locafero/file_type.go
new file mode 100644
index 0000000..9a9b140
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/file_type.go
@@ -0,0 +1,28 @@
+package locafero
+
+import "io/fs"
+
+// FileType represents the kind of entries [Finder] can return.
+type FileType int
+
+const (
+ FileTypeAll FileType = iota
+ FileTypeFile
+ FileTypeDir
+)
+
+func (ft FileType) matchFileInfo(info fs.FileInfo) bool {
+ switch ft {
+ case FileTypeAll:
+ return true
+
+ case FileTypeFile:
+ return !info.IsDir()
+
+ case FileTypeDir:
+ return info.IsDir()
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go
new file mode 100644
index 0000000..754c8b2
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/finder.go
@@ -0,0 +1,165 @@
+// Package finder looks for files and directories in an {fs.Fs} filesystem.
+package locafero
+
+import (
+ "errors"
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ "github.com/sourcegraph/conc/iter"
+ "github.com/spf13/afero"
+)
+
+// Finder looks for files and directories in an [afero.Fs] filesystem.
+type Finder struct {
+ // Paths represents a list of locations that the [Finder] will search in.
+ //
+ // They are essentially the root directories or starting points for the search.
+ //
+ // Examples:
+ // - home/user
+ // - etc
+ Paths []string
+
+ // Names are specific entries that the [Finder] will look for within the given Paths.
+ //
+ // It provides the capability to search for entries with depth,
+ // meaning it can target deeper locations within the directory structure.
+ //
+ // It also supports glob syntax (as defined by [filepat.Match]), offering greater flexibility in search patterns.
+ //
+ // Examples:
+ // - config.yaml
+ // - home/*/config.yaml
+ // - home/*/config.*
+ Names []string
+
+ // Type restricts the kind of entries returned by the [Finder].
+ //
+ // This parameter helps in differentiating and filtering out files from directories or vice versa.
+ Type FileType
+}
+
+// Find looks for files and directories in an [afero.Fs] filesystem.
+func (f Finder) Find(fsys afero.Fs) ([]string, error) {
+ // Arbitrary go routine limit (TODO: make this a parameter)
+ // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError()
+
+ type searchItem struct {
+ path string
+ name string
+ }
+
+ var searchItems []searchItem
+
+ for _, searchPath := range f.Paths {
+ searchPath := searchPath
+
+ for _, searchName := range f.Names {
+ searchName := searchName
+
+ searchItems = append(searchItems, searchItem{searchPath, searchName})
+
+ // pool.Go(func() ([]string, error) {
+ // // If the name contains any glob character, perform a glob match
+ // if strings.ContainsAny(searchName, "*?[]\\^") {
+ // return globWalkSearch(fsys, searchPath, searchName, f.Type)
+ // }
+ //
+ // return statSearch(fsys, searchPath, searchName, f.Type)
+ // })
+ }
+ }
+
+ // allResults, err := pool.Wait()
+ // if err != nil {
+ // return nil, err
+ // }
+
+ allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) {
+ // If the name contains any glob character, perform a glob match
+ if strings.ContainsAny(item.name, "*?[]\\^") {
+ return globWalkSearch(fsys, item.path, item.name, f.Type)
+ }
+
+ return statSearch(fsys, item.path, item.name, f.Type)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var results []string
+
+ for _, r := range allResults {
+ results = append(results, r...)
+ }
+
+ // Sort results in alphabetical order for now
+ // sort.Strings(results)
+
+ return results, nil
+}
+
+func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
+ var results []string
+
+ err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Skip the root path
+ if p == searchPath {
+ return nil
+ }
+
+ var result error
+
+ // Stop reading subdirectories
+ // TODO: add depth detection here
+ if fileInfo.IsDir() && filepath.Dir(p) == searchPath {
+ result = fs.SkipDir
+ }
+
+ // Skip unmatching type
+ if !searchType.matchFileInfo(fileInfo) {
+ return result
+ }
+
+ match, err := filepath.Match(searchName, fileInfo.Name())
+ if err != nil {
+ return err
+ }
+
+ if match {
+ results = append(results, p)
+ }
+
+ return result
+ })
+ if err != nil {
+ return results, err
+ }
+
+ return results, nil
+}
+
+func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
+ filePath := filepath.Join(searchPath, searchName)
+
+ fileInfo, err := fsys.Stat(filePath)
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Skip unmatching type
+ if !searchType.matchFileInfo(fileInfo) {
+ return nil, nil
+ }
+
+ return []string{filePath}, nil
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock
new file mode 100644
index 0000000..46d28f8
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/flake.lock
@@ -0,0 +1,273 @@
+{
+ "nodes": {
+ "devenv": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1694097209,
+ "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1693611461,
+ "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1685518550,
+ "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1660459072,
+ "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "lowdown-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "lowdown-src": "lowdown-src",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1676545802,
+ "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "relaxed-flakes",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1678875422,
+ "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "dir": "lib",
+ "lastModified": 1693471703,
+ "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85",
+ "type": "github"
+ },
+ "original": {
+ "dir": "lib",
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1685801374,
+ "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.05",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1694343207,
+ "narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "78058d810644f5ed276804ce7ea9e82d92bee293",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1688056373,
+ "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_2"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix
new file mode 100644
index 0000000..209ecf2
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/flake.nix
@@ -0,0 +1,47 @@
+{
+ description = "Finder library for Afero";
+
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ };
+
+ packages = with pkgs; [
+ just
+
+ golangci-lint
+ ];
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+
+ ci_1_20 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_20;
+ };
+ };
+ };
+ };
+ };
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/helpers.go b/vendor/github.com/sagikazarmark/locafero/helpers.go
new file mode 100644
index 0000000..05b4344
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/helpers.go
@@ -0,0 +1,41 @@
+package locafero
+
+import "fmt"
+
+// NameWithExtensions creates a list of names from a base name and a list of extensions.
+//
+// TODO: find a better name for this function.
+func NameWithExtensions(baseName string, extensions ...string) []string {
+ var names []string
+
+ if baseName == "" {
+ return names
+ }
+
+ for _, ext := range extensions {
+ if ext == "" {
+ continue
+ }
+
+ names = append(names, fmt.Sprintf("%s.%s", baseName, ext))
+ }
+
+ return names
+}
+
+// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions,
+// plus it adds the base name (without any extensions) to the end of the list.
+//
+// TODO: find a better name for this function.
+func NameWithOptionalExtensions(baseName string, extensions ...string) []string {
+ var names []string
+
+ if baseName == "" {
+ return names
+ }
+
+ names = NameWithExtensions(baseName, extensions...)
+ names = append(names, baseName)
+
+ return names
+}
diff --git a/vendor/github.com/sagikazarmark/locafero/justfile b/vendor/github.com/sagikazarmark/locafero/justfile
new file mode 100644
index 0000000..00a8885
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/locafero/justfile
@@ -0,0 +1,11 @@
+default:
+ just --list
+
+test:
+ go test -race -v ./...
+
+lint:
+ golangci-lint run
+
+fmt:
+ golangci-lint run --fix
diff --git a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/vendor/github.com/sagikazarmark/slog-shim/.editorconfig
new file mode 100644
index 0000000..1fb0e1b
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/.editorconfig
@@ -0,0 +1,18 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.nix]
+indent_size = 2
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[Taskfile.yaml]
+indent_size = 2
diff --git a/vendor/github.com/sagikazarmark/slog-shim/.envrc b/vendor/github.com/sagikazarmark/slog-shim/.envrc
new file mode 100644
index 0000000..3ce7171
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
+fi
+use flake . --impure
diff --git a/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/vendor/github.com/sagikazarmark/slog-shim/.gitignore
new file mode 100644
index 0000000..dc6d8b5
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/.gitignore
@@ -0,0 +1,4 @@
+/.devenv/
+/.direnv/
+/.task/
+/build/
diff --git a/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/vendor/github.com/sagikazarmark/slog-shim/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/sagikazarmark/slog-shim/README.md b/vendor/github.com/sagikazarmark/slog-shim/README.md
new file mode 100644
index 0000000..1f5be85
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/README.md
@@ -0,0 +1,81 @@
+# [slog](https://pkg.go.dev/log/slog) shim
+
+[](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml)
+[](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim)
+
+[](https://builtwithnix.org)
+
+Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library.
+Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately,
+especially since updating to Go 1.21 is a decision that most libraries won't make overnight.
+
+Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog).
+While it's generally advised against using experimental packages in production,
+this one served as a sort of backport package for the last few years,
+incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`).
+
+This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`.
+
+**How does it achieve backwards compatibility?**
+
+Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is.
+(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.)
+
+This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`.
+Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly.
+
+For older Go versions, the library might become unstable after removing the shim.
+However, since those older versions are no longer supported, the promise of backward compatibility remains intact.
+
+## Installation
+
+```shell
+go get github.com/sagikazarmark/slog-shim
+```
+
+## Usage
+
+Import this package into your library and use it in your public API:
+
+```go
+package mylib
+
+import slog "github.com/sagikazarmark/slog-shim"
+
+func New(logger *slog.Logger) MyLib {
+ // ...
+}
+```
+
+When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21):
+
+```go
+package main
+
+import "log/slog"
+
+// OR
+
+import "golang.org/x/exp/slog"
+
+mylib.New(slog.Default())
+```
+
+**Make sure consumers are aware that your API behaves differently on different Go versions.**
+
+Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code:
+
+```diff
+package mylib
+
+- import slog "github.com/sagikazarmark/slog-shim"
++ import "log/slog"
+
+func New(logger *slog.Logger) MyLib {
+ // ...
+}
+```
+
+## License
+
+The project is licensed under a [BSD-style license](LICENSE).
diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr.go b/vendor/github.com/sagikazarmark/slog-shim/attr.go
new file mode 100644
index 0000000..89608bf
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/attr.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// An Attr is a key-value pair.
+type Attr = slog.Attr
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return slog.String(key, value)
+}
+
+// Int64 returns an Attr for an int64.
+func Int64(key string, value int64) Attr {
+ return slog.Int64(key, value)
+}
+
+// Int converts an int to an int64 and returns
+// an Attr with that value.
+func Int(key string, value int) Attr {
+ return slog.Int(key, value)
+}
+
+// Uint64 returns an Attr for a uint64.
+func Uint64(key string, v uint64) Attr {
+ return slog.Uint64(key, v)
+}
+
+// Float64 returns an Attr for a floating-point number.
+func Float64(key string, v float64) Attr {
+ return slog.Float64(key, v)
+}
+
+// Bool returns an Attr for a bool.
+func Bool(key string, v bool) Attr {
+ return slog.Bool(key, v)
+}
+
+// Time returns an Attr for a time.Time.
+// It discards the monotonic portion.
+func Time(key string, v time.Time) Attr {
+ return slog.Time(key, v)
+}
+
+// Duration returns an Attr for a time.Duration.
+func Duration(key string, v time.Duration) Attr {
+ return slog.Duration(key, v)
+}
+
+// Group returns an Attr for a Group Value.
+// The first argument is the key; the remaining arguments
+// are converted to Attrs as in [Logger.Log].
+//
+// Use Group to collect several key-value pairs under a single
+// key on a log line, or as the result of LogValue
+// in order to log a single value as multiple Attrs.
+func Group(key string, args ...any) Attr {
+ return slog.Group(key, args...)
+}
+
+// Any returns an Attr for the supplied value.
+// See [Value.AnyValue] for how values are treated.
+func Any(key string, value any) Attr {
+ return slog.Any(key, value)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go
new file mode 100644
index 0000000..b664813
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// An Attr is a key-value pair.
+type Attr = slog.Attr
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return slog.String(key, value)
+}
+
+// Int64 returns an Attr for an int64.
+func Int64(key string, value int64) Attr {
+ return slog.Int64(key, value)
+}
+
+// Int converts an int to an int64 and returns
+// an Attr with that value.
+func Int(key string, value int) Attr {
+ return slog.Int(key, value)
+}
+
+// Uint64 returns an Attr for a uint64.
+func Uint64(key string, v uint64) Attr {
+ return slog.Uint64(key, v)
+}
+
+// Float64 returns an Attr for a floating-point number.
+func Float64(key string, v float64) Attr {
+ return slog.Float64(key, v)
+}
+
+// Bool returns an Attr for a bool.
+func Bool(key string, v bool) Attr {
+ return slog.Bool(key, v)
+}
+
+// Time returns an Attr for a time.Time.
+// It discards the monotonic portion.
+func Time(key string, v time.Time) Attr {
+ return slog.Time(key, v)
+}
+
+// Duration returns an Attr for a time.Duration.
+func Duration(key string, v time.Duration) Attr {
+ return slog.Duration(key, v)
+}
+
+// Group returns an Attr for a Group Value.
+// The first argument is the key; the remaining arguments
+// are converted to Attrs as in [Logger.Log].
+//
+// Use Group to collect several key-value pairs under a single
+// key on a log line, or as the result of LogValue
+// in order to log a single value as multiple Attrs.
+func Group(key string, args ...any) Attr {
+ return slog.Group(key, args...)
+}
+
+// Any returns an Attr for the supplied value.
+// See [Value.AnyValue] for how values are treated.
+func Any(key string, value any) Attr {
+ return slog.Any(key, value)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/vendor/github.com/sagikazarmark/slog-shim/flake.lock
new file mode 100644
index 0000000..7e8898e
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/flake.lock
@@ -0,0 +1,273 @@
+{
+ "nodes": {
+ "devenv": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1694097209,
+ "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1693611461,
+ "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1685518550,
+ "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1660459072,
+ "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "lowdown-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "lowdown-src": "lowdown-src",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1676545802,
+ "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "relaxed-flakes",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1678875422,
+ "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "dir": "lib",
+ "lastModified": 1693471703,
+ "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85",
+ "type": "github"
+ },
+ "original": {
+ "dir": "lib",
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1685801374,
+ "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.05",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1694345580,
+ "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "master",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1688056373,
+ "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_2"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/vendor/github.com/sagikazarmark/slog-shim/flake.nix
new file mode 100644
index 0000000..7239bbc
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/flake.nix
@@ -0,0 +1,57 @@
+{
+ inputs = {
+ # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ nixpkgs.url = "github:NixOS/nixpkgs/master";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ go.package = pkgs.lib.mkDefault pkgs.go_1_21;
+ };
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+
+ ci_1_19 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_19;
+ };
+ };
+
+ ci_1_20 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_20;
+ };
+ };
+
+ ci_1_21 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_21;
+ };
+ };
+ };
+ };
+ };
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler.go b/vendor/github.com/sagikazarmark/slog-shim/handler.go
new file mode 100644
index 0000000..f55556a
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/handler.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+)
+
+// A Handler handles log records produced by a Logger..
+//
+// A typical handler may print log records to standard error,
+// or write them to a file or database, or perhaps augment them
+// with additional attributes and pass them on to another handler.
+//
+// Any of the Handler's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Handler to
+// manage this concurrency.
+//
+// Users of the slog package should not invoke Handler methods directly.
+// They should use the methods of [Logger] instead.
+type Handler = slog.Handler
+
+// HandlerOptions are options for a TextHandler or JSONHandler.
+// A zero HandlerOptions consists entirely of default values.
+type HandlerOptions = slog.HandlerOptions
+
+// Keys for "built-in" attributes.
+const (
+ // TimeKey is the key used by the built-in handlers for the time
+ // when the log method is called. The associated Value is a [time.Time].
+ TimeKey = slog.TimeKey
+ // LevelKey is the key used by the built-in handlers for the level
+ // of the log call. The associated value is a [Level].
+ LevelKey = slog.LevelKey
+ // MessageKey is the key used by the built-in handlers for the
+ // message of the log call. The associated value is a string.
+ MessageKey = slog.MessageKey
+ // SourceKey is the key used by the built-in handlers for the source file
+ // and line of the log call. The associated value is a string.
+ SourceKey = slog.SourceKey
+)
diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go
new file mode 100644
index 0000000..6700575
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "golang.org/x/exp/slog"
+)
+
+// A Handler handles log records produced by a Logger..
+//
+// A typical handler may print log records to standard error,
+// or write them to a file or database, or perhaps augment them
+// with additional attributes and pass them on to another handler.
+//
+// Any of the Handler's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Handler to
+// manage this concurrency.
+//
+// Users of the slog package should not invoke Handler methods directly.
+// They should use the methods of [Logger] instead.
+type Handler = slog.Handler
+
+// HandlerOptions are options for a TextHandler or JSONHandler.
+// A zero HandlerOptions consists entirely of default values.
+type HandlerOptions = slog.HandlerOptions
+
+// Keys for "built-in" attributes.
+const (
+ // TimeKey is the key used by the built-in handlers for the time
+ // when the log method is called. The associated Value is a [time.Time].
+ TimeKey = slog.TimeKey
+ // LevelKey is the key used by the built-in handlers for the level
+ // of the log call. The associated value is a [Level].
+ LevelKey = slog.LevelKey
+ // MessageKey is the key used by the built-in handlers for the
+ // message of the log call. The associated value is a string.
+ MessageKey = slog.MessageKey
+ // SourceKey is the key used by the built-in handlers for the source file
+ // and line of the log call. The associated value is a string.
+ SourceKey = slog.SourceKey
+)
diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go
new file mode 100644
index 0000000..7c22bd8
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "io"
+ "log/slog"
+)
+
+// JSONHandler is a Handler that writes Records to an io.Writer as
+// line-delimited JSON objects.
+type JSONHandler = slog.JSONHandler
+
+// NewJSONHandler creates a JSONHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
+ return slog.NewJSONHandler(w, opts)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go
new file mode 100644
index 0000000..7b14f10
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "io"
+
+ "golang.org/x/exp/slog"
+)
+
+// JSONHandler is a Handler that writes Records to an io.Writer as
+// line-delimited JSON objects.
+type JSONHandler = slog.JSONHandler
+
+// NewJSONHandler creates a JSONHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
+ return slog.NewJSONHandler(w, opts)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/level.go b/vendor/github.com/sagikazarmark/slog-shim/level.go
new file mode 100644
index 0000000..07288cf
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/level.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+)
+
+// A Level is the importance or severity of a log event.
+// The higher the level, the more important or severe the event.
+type Level = slog.Level
+
+// Level numbers are inherently arbitrary,
+// but we picked them to satisfy three constraints.
+// Any system can map them to another numbering scheme if it wishes.
+//
+// First, we wanted the default level to be Info, Since Levels are ints, Info is
+// the default value for int, zero.
+//
+// Second, we wanted to make it easy to use levels to specify logger verbosity.
+// Since a larger level means a more severe event, a logger that accepts events
+// with smaller (or more negative) level means a more verbose logger. Logger
+// verbosity is thus the negation of event severity, and the default verbosity
+// of 0 accepts all events at least as severe as INFO.
+//
+// Third, we wanted some room between levels to accommodate schemes with named
+// levels between ours. For example, Google Cloud Logging defines a Notice level
+// between Info and Warn. Since there are only a few of these intermediate
+// levels, the gap between the numbers need not be large. Our gap of 4 matches
+// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
+// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
+// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
+// does not. But those OpenTelemetry levels can still be represented as slog
+// Levels by using the appropriate integers.
+//
+// Names for common levels.
+const (
+ LevelDebug Level = slog.LevelDebug
+ LevelInfo Level = slog.LevelInfo
+ LevelWarn Level = slog.LevelWarn
+ LevelError Level = slog.LevelError
+)
+
+// A LevelVar is a Level variable, to allow a Handler level to change
+// dynamically.
+// It implements Leveler as well as a Set method,
+// and it is safe for use by multiple goroutines.
+// The zero LevelVar corresponds to LevelInfo.
+type LevelVar = slog.LevelVar
+
+// A Leveler provides a Level value.
+//
+// As Level itself implements Leveler, clients typically supply
+// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// Clients who need to vary the level dynamically can provide a more complex
+// Leveler implementation such as *LevelVar.
+type Leveler = slog.Leveler
diff --git a/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/vendor/github.com/sagikazarmark/slog-shim/level_120.go
new file mode 100644
index 0000000..d3feb94
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/level_120.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "golang.org/x/exp/slog"
+)
+
+// A Level is the importance or severity of a log event.
+// The higher the level, the more important or severe the event.
+type Level = slog.Level
+
+// Level numbers are inherently arbitrary,
+// but we picked them to satisfy three constraints.
+// Any system can map them to another numbering scheme if it wishes.
+//
+// First, we wanted the default level to be Info, Since Levels are ints, Info is
+// the default value for int, zero.
+//
+// Second, we wanted to make it easy to use levels to specify logger verbosity.
+// Since a larger level means a more severe event, a logger that accepts events
+// with smaller (or more negative) level means a more verbose logger. Logger
+// verbosity is thus the negation of event severity, and the default verbosity
+// of 0 accepts all events at least as severe as INFO.
+//
+// Third, we wanted some room between levels to accommodate schemes with named
+// levels between ours. For example, Google Cloud Logging defines a Notice level
+// between Info and Warn. Since there are only a few of these intermediate
+// levels, the gap between the numbers need not be large. Our gap of 4 matches
+// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
+// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
+// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
+// does not. But those OpenTelemetry levels can still be represented as slog
+// Levels by using the appropriate integers.
+//
+// Names for common levels.
+const (
+ LevelDebug Level = slog.LevelDebug
+ LevelInfo Level = slog.LevelInfo
+ LevelWarn Level = slog.LevelWarn
+ LevelError Level = slog.LevelError
+)
+
+// A LevelVar is a Level variable, to allow a Handler level to change
+// dynamically.
+// It implements Leveler as well as a Set method,
+// and it is safe for use by multiple goroutines.
+// The zero LevelVar corresponds to LevelInfo.
+type LevelVar = slog.LevelVar
+
+// A Leveler provides a Level value.
+//
+// As Level itself implements Leveler, clients typically supply
+// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// Clients who need to vary the level dynamically can provide a more complex
+// Leveler implementation such as *LevelVar.
+type Leveler = slog.Leveler
diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger.go b/vendor/github.com/sagikazarmark/slog-shim/logger.go
new file mode 100644
index 0000000..e80036b
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/logger.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "context"
+ "log"
+ "log/slog"
+)
+
+// Default returns the default Logger.
+func Default() *Logger { return slog.Default() }
+
+// SetDefault makes l the default Logger.
+// After this call, output from the log package's default Logger
+// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+func SetDefault(l *Logger) {
+ slog.SetDefault(l)
+}
+
+// A Logger records structured information about each call to its
+// Log, Debug, Info, Warn, and Error methods.
+// For each call, it creates a Record and passes it to a Handler.
+//
+// To create a new Logger, call [New] or a Logger method
+// that begins "With".
+type Logger = slog.Logger
+
+// New creates a new Logger with the given non-nil Handler.
+func New(h Handler) *Logger {
+ return slog.New(h)
+}
+
+// With calls Logger.With on the default logger.
+func With(args ...any) *Logger {
+ return slog.With(args...)
+}
+
+// NewLogLogger returns a new log.Logger such that each call to its Output method
+// dispatches a Record to the specified handler. The logger acts as a bridge from
+// the older log API to newer structured logging handlers.
+func NewLogLogger(h Handler, level Level) *log.Logger {
+ return slog.NewLogLogger(h, level)
+}
+
+// Debug calls Logger.Debug on the default logger.
+func Debug(msg string, args ...any) {
+ slog.Debug(msg, args...)
+}
+
+// DebugContext calls Logger.DebugContext on the default logger.
+func DebugContext(ctx context.Context, msg string, args ...any) {
+ slog.DebugContext(ctx, msg, args...)
+}
+
+// Info calls Logger.Info on the default logger.
+func Info(msg string, args ...any) {
+ slog.Info(msg, args...)
+}
+
+// InfoContext calls Logger.InfoContext on the default logger.
+func InfoContext(ctx context.Context, msg string, args ...any) {
+ slog.InfoContext(ctx, msg, args...)
+}
+
+// Warn calls Logger.Warn on the default logger.
+func Warn(msg string, args ...any) {
+ slog.Warn(msg, args...)
+}
+
+// WarnContext calls Logger.WarnContext on the default logger.
+func WarnContext(ctx context.Context, msg string, args ...any) {
+ slog.WarnContext(ctx, msg, args...)
+}
+
+// Error calls Logger.Error on the default logger.
+func Error(msg string, args ...any) {
+ slog.Error(msg, args...)
+}
+
+// ErrorContext calls Logger.ErrorContext on the default logger.
+func ErrorContext(ctx context.Context, msg string, args ...any) {
+ slog.ErrorContext(ctx, msg, args...)
+}
+
+// Log calls Logger.Log on the default logger.
+func Log(ctx context.Context, level Level, msg string, args ...any) {
+ slog.Log(ctx, level, msg, args...)
+}
+
+// LogAttrs calls Logger.LogAttrs on the default logger.
+func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ slog.LogAttrs(ctx, level, msg, attrs...)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go
new file mode 100644
index 0000000..97ebdd5
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "context"
+ "log"
+
+ "golang.org/x/exp/slog"
+)
+
+// Default returns the default Logger.
+func Default() *Logger { return slog.Default() }
+
+// SetDefault makes l the default Logger.
+// After this call, output from the log package's default Logger
+// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+func SetDefault(l *Logger) {
+ slog.SetDefault(l)
+}
+
+// A Logger records structured information about each call to its
+// Log, Debug, Info, Warn, and Error methods.
+// For each call, it creates a Record and passes it to a Handler.
+//
+// To create a new Logger, call [New] or a Logger method
+// that begins "With".
+type Logger = slog.Logger
+
+// New creates a new Logger with the given non-nil Handler.
+func New(h Handler) *Logger {
+ return slog.New(h)
+}
+
+// With calls Logger.With on the default logger.
+func With(args ...any) *Logger {
+ return slog.With(args...)
+}
+
+// NewLogLogger returns a new log.Logger such that each call to its Output method
+// dispatches a Record to the specified handler. The logger acts as a bridge from
+// the older log API to newer structured logging handlers.
+func NewLogLogger(h Handler, level Level) *log.Logger {
+ return slog.NewLogLogger(h, level)
+}
+
+// Debug calls Logger.Debug on the default logger.
+func Debug(msg string, args ...any) {
+ slog.Debug(msg, args...)
+}
+
+// DebugContext calls Logger.DebugContext on the default logger.
+func DebugContext(ctx context.Context, msg string, args ...any) {
+ slog.DebugContext(ctx, msg, args...)
+}
+
+// Info calls Logger.Info on the default logger.
+func Info(msg string, args ...any) {
+ slog.Info(msg, args...)
+}
+
+// InfoContext calls Logger.InfoContext on the default logger.
+func InfoContext(ctx context.Context, msg string, args ...any) {
+ slog.InfoContext(ctx, msg, args...)
+}
+
+// Warn calls Logger.Warn on the default logger.
+func Warn(msg string, args ...any) {
+ slog.Warn(msg, args...)
+}
+
+// WarnContext calls Logger.WarnContext on the default logger.
+func WarnContext(ctx context.Context, msg string, args ...any) {
+ slog.WarnContext(ctx, msg, args...)
+}
+
+// Error calls Logger.Error on the default logger.
+func Error(msg string, args ...any) {
+ slog.Error(msg, args...)
+}
+
+// ErrorContext calls Logger.ErrorContext on the default logger.
+func ErrorContext(ctx context.Context, msg string, args ...any) {
+ slog.ErrorContext(ctx, msg, args...)
+}
+
+// Log calls Logger.Log on the default logger.
+func Log(ctx context.Context, level Level, msg string, args ...any) {
+ slog.Log(ctx, level, msg, args...)
+}
+
+// LogAttrs calls Logger.LogAttrs on the default logger.
+func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ slog.LogAttrs(ctx, level, msg, attrs...)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/record.go b/vendor/github.com/sagikazarmark/slog-shim/record.go
new file mode 100644
index 0000000..85ad1f7
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/record.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// A Record holds information about a log event.
+// Copies of a Record share state.
+// Do not modify a Record after handing out a copy to it.
+// Call [NewRecord] to create a new Record.
+// Use [Record.Clone] to create a copy with no shared state.
+type Record = slog.Record
+
+// NewRecord creates a Record from the given arguments.
+// Use [Record.AddAttrs] to add attributes to the Record.
+//
+// NewRecord is intended for logging APIs that want to support a [Handler] as
+// a backend.
+func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
+ return slog.NewRecord(t, level, msg, pc)
+}
+
+// Source describes the location of a line of source code.
+type Source = slog.Source
diff --git a/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/vendor/github.com/sagikazarmark/slog-shim/record_120.go
new file mode 100644
index 0000000..c2eaf4e
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/record_120.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// A Record holds information about a log event.
+// Copies of a Record share state.
+// Do not modify a Record after handing out a copy to it.
+// Call [NewRecord] to create a new Record.
+// Use [Record.Clone] to create a copy with no shared state.
+type Record = slog.Record
+
+// NewRecord creates a Record from the given arguments.
+// Use [Record.AddAttrs] to add attributes to the Record.
+//
+// NewRecord is intended for logging APIs that want to support a [Handler] as
+// a backend.
+func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
+ return slog.NewRecord(t, level, msg, pc)
+}
+
+// Source describes the location of a line of source code.
+type Source = slog.Source
diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go
new file mode 100644
index 0000000..45f6cfc
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "io"
+ "log/slog"
+)
+
+// TextHandler is a Handler that writes Records to an io.Writer as a
+// sequence of key=value pairs separated by spaces and followed by a newline.
+type TextHandler = slog.TextHandler
+
+// NewTextHandler creates a TextHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
+ return slog.NewTextHandler(w, opts)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go
new file mode 100644
index 0000000..a69d63c
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "io"
+
+ "golang.org/x/exp/slog"
+)
+
+// TextHandler is a Handler that writes Records to an io.Writer as a
+// sequence of key=value pairs separated by spaces and followed by a newline.
+type TextHandler = slog.TextHandler
+
+// NewTextHandler creates a TextHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
+ return slog.NewTextHandler(w, opts)
+}
diff --git a/vendor/github.com/sagikazarmark/slog-shim/value.go b/vendor/github.com/sagikazarmark/slog-shim/value.go
new file mode 100644
index 0000000..61173eb
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/value.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// A Value can represent any Go value, but unlike type any,
+// it can represent most small values without an allocation.
+// The zero Value corresponds to nil.
+type Value = slog.Value
+
+// Kind is the kind of a Value.
+type Kind = slog.Kind
+
+// The following list is sorted alphabetically, but it's also important that
+// KindAny is 0 so that a zero Value represents nil.
+const (
+ KindAny = slog.KindAny
+ KindBool = slog.KindBool
+ KindDuration = slog.KindDuration
+ KindFloat64 = slog.KindFloat64
+ KindInt64 = slog.KindInt64
+ KindString = slog.KindString
+ KindTime = slog.KindTime
+ KindUint64 = slog.KindUint64
+ KindGroup = slog.KindGroup
+ KindLogValuer = slog.KindLogValuer
+)
+
+//////////////// Constructors
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ return slog.StringValue(value)
+}
+
+// IntValue returns a Value for an int.
+func IntValue(v int) Value {
+ return slog.IntValue(v)
+}
+
+// Int64Value returns a Value for an int64.
+func Int64Value(v int64) Value {
+ return slog.Int64Value(v)
+}
+
+// Uint64Value returns a Value for a uint64.
+func Uint64Value(v uint64) Value {
+ return slog.Uint64Value(v)
+}
+
+// Float64Value returns a Value for a floating-point number.
+func Float64Value(v float64) Value {
+ return slog.Float64Value(v)
+}
+
+// BoolValue returns a Value for a bool.
+func BoolValue(v bool) Value {
+ return slog.BoolValue(v)
+}
+
+// TimeValue returns a Value for a time.Time.
+// It discards the monotonic portion.
+func TimeValue(v time.Time) Value {
+ return slog.TimeValue(v)
+}
+
+// DurationValue returns a Value for a time.Duration.
+func DurationValue(v time.Duration) Value {
+ return slog.DurationValue(v)
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ return slog.GroupValue(as...)
+}
+
+// AnyValue returns a Value for the supplied value.
+//
+// If the supplied value is of type Value, it is returned
+// unmodified.
+//
+// Given a value of one of Go's predeclared string, bool, or
+// (non-complex) numeric types, AnyValue returns a Value of kind
+// String, Bool, Uint64, Int64, or Float64. The width of the
+// original numeric type is not preserved.
+//
+// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
+// KindTime or KindDuration. The monotonic time is not preserved.
+//
+// For nil, or values of all other types, including named types whose
+// underlying type is numeric, AnyValue returns a value of kind KindAny.
+func AnyValue(v any) Value {
+ return slog.AnyValue(v)
+}
+
+// A LogValuer is any Go value that can convert itself into a Value for logging.
+//
+// This mechanism may be used to defer expensive operations until they are
+// needed, or to expand a single value into a sequence of components.
+type LogValuer = slog.LogValuer
diff --git a/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/vendor/github.com/sagikazarmark/slog-shim/value_120.go
new file mode 100644
index 0000000..0f9f871
--- /dev/null
+++ b/vendor/github.com/sagikazarmark/slog-shim/value_120.go
@@ -0,0 +1,110 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// A Value can represent any Go value, but unlike type any,
+// it can represent most small values without an allocation.
+// The zero Value corresponds to nil.
+type Value = slog.Value
+
+// Kind is the kind of a Value.
+type Kind = slog.Kind
+
+// The following list is sorted alphabetically, but it's also important that
+// KindAny is 0 so that a zero Value represents nil.
+const (
+ KindAny = slog.KindAny
+ KindBool = slog.KindBool
+ KindDuration = slog.KindDuration
+ KindFloat64 = slog.KindFloat64
+ KindInt64 = slog.KindInt64
+ KindString = slog.KindString
+ KindTime = slog.KindTime
+ KindUint64 = slog.KindUint64
+ KindGroup = slog.KindGroup
+ KindLogValuer = slog.KindLogValuer
+)
+
+//////////////// Constructors
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ return slog.StringValue(value)
+}
+
+// IntValue returns a Value for an int.
+func IntValue(v int) Value {
+ return slog.IntValue(v)
+}
+
+// Int64Value returns a Value for an int64.
+func Int64Value(v int64) Value {
+ return slog.Int64Value(v)
+}
+
+// Uint64Value returns a Value for a uint64.
+func Uint64Value(v uint64) Value {
+ return slog.Uint64Value(v)
+}
+
+// Float64Value returns a Value for a floating-point number.
+func Float64Value(v float64) Value {
+ return slog.Float64Value(v)
+}
+
+// BoolValue returns a Value for a bool.
+func BoolValue(v bool) Value {
+ return slog.BoolValue(v)
+}
+
+// TimeValue returns a Value for a time.Time.
+// It discards the monotonic portion.
+func TimeValue(v time.Time) Value {
+ return slog.TimeValue(v)
+}
+
+// DurationValue returns a Value for a time.Duration.
+func DurationValue(v time.Duration) Value {
+ return slog.DurationValue(v)
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ return slog.GroupValue(as...)
+}
+
+// AnyValue returns a Value for the supplied value.
+//
+// If the supplied value is of type Value, it is returned
+// unmodified.
+//
+// Given a value of one of Go's predeclared string, bool, or
+// (non-complex) numeric types, AnyValue returns a Value of kind
+// String, Bool, Uint64, Int64, or Float64. The width of the
+// original numeric type is not preserved.
+//
+// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
+// KindTime or KindDuration. The monotonic time is not preserved.
+//
+// For nil, or values of all other types, including named types whose
+// underlying type is numeric, AnyValue returns a value of kind KindAny.
+func AnyValue(v any) Value {
+ return slog.AnyValue(v)
+}
+
+// A LogValuer is any Go value that can convert itself into a Value for logging.
+//
+// This mechanism may be used to defer expensive operations until they are
+// needed, or to expand a single value into a sequence of components.
+type LogValuer = slog.LogValuer
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..1fb13ab
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,4 @@
+logrus
+vendor
+
+.idea/
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
new file mode 100644
index 0000000..65dc285
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.golangci.yml
@@ -0,0 +1,40 @@
+run:
+ # do not run on test files yet
+ tests: false
+
+# all available settings of specific linters
+linters-settings:
+ errcheck:
+ # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
+ # default is false: such cases aren't reported by default.
+ check-type-assertions: false
+
+ # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
+ # default is false: such cases aren't reported by default.
+ check-blank: false
+
+ lll:
+ line-length: 100
+ tab-width: 4
+
+ prealloc:
+ simple: false
+ range-loops: false
+ for-loops: false
+
+ whitespace:
+ multi-if: false # Enforces newlines (or comments) after every multi-line if statement
+ multi-func: false # Enforces newlines (or comments) after every multi-line function signature
+
+linters:
+ enable:
+ - megacheck
+ - govet
+ disable:
+ - maligned
+ - prealloc
+ disable-all: false
+ presets:
+ - bugs
+ - unused
+ fast: false
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..c1dbd5a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+git:
+ depth: 1
+env:
+ - GO111MODULE=on
+go: 1.15.x
+os: linux
+install:
+ - ./travis/install.sh
+script:
+ - cd ci
+ - go run mage.go -v -w ../ crossBuild
+ - go run mage.go -v -w ../ lint
+ - go run mage.go -v -w ../ test
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..7567f61
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,259 @@
+# 1.8.1
+Code quality:
+ * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
+ * improve timestamp format documentation
+
+Fixes:
+ * fix race condition on logger hooks
+
+
+# 1.8.0
+
+Correct versioning number replacing v1.7.1.
+
+# 1.7.1
+
+Beware this release has introduced a new public API and its semver is therefore incorrect.
+
+Code quality:
+ * use go 1.15 in travis
+ * use magefile as task runner
+
+Fixes:
+ * small fixes about new go 1.13 error formatting system
+ * Fix for long time race condiction with mutating data hooks
+
+Features:
+ * build support for zos
+
+# 1.7.0
+Fixes:
+ * the dependency toward a windows terminal library has been removed
+
+Features:
+ * a new buffer pool management API has been added
+ * a set of `Fn()` functions have been added
+
+# 1.6.0
+Fixes:
+ * end of line cleanup
+ * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
+ * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
+
+Features:
+ * add an option to the `TextFormatter` to completely disable fields quoting
+
+# 1.5.0
+Code quality:
+ * add golangci linter run on travis
+
+Fixes:
+ * add mutex for hooks concurrent access on `Entry` data
+ * caller function field for go1.14
+ * fix build issue for gopherjs target
+
+Feature:
+ * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
+ * add a `DisableHTMLEscape` option in the `JSONFormatter`
+ * add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
+
+# 1.4.2
+ * Fixes build break for plan9, nacl, solaris
+# 1.4.1
+This new release introduces:
+ * Enhance TextFormatter to not print caller information when they are empty (#944)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
+
+Fixes:
+ * Fix Entry.WithContext method to return a copy of the initial entry (#941)
+
+# 1.4.0
+This new release introduces:
+ * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
+ * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
+ * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
+
+Fixes:
+ * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
+ * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
+ * Fix infinite recursion on unknown `Level.String()` (#907)
+ * Fix race condition in `getCaller` (#916).
+
+
+# 1.3.0
+This new release introduces:
+ * Log, Logf, Logln functions for Logger and Entry that take a Level
+
+Fixes:
+ * Building prometheus node_exporter on AIX (#840)
+ * Race condition in TextFormatter (#468)
+ * Travis CI import path (#868)
+ * Remove coloured output on Windows (#862)
+ * Pointer to func as field in JSONFormatter (#870)
+ * Properly marshal Levels (#873)
+
+# 1.2.0
+This new release introduces:
+ * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+ * A new trace level named `Trace` whose level is below `Debug`
+ * A configurable exit function to be called upon a Fatal trace
+ * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+ * fix the build break on Solaris
+ * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+ * several fixes:
+ * a fix for a race condition on entry formatting
+ * proper cleanup of previously used entries before putting them back in the pool
+ * the extra new line at the end of message in text formatter has been removed
+ * a new global public API to check if a level is activated: IsLevelEnabled
+ * the following methods have been added to the Logger object
+ * IsLevelEnabled
+ * SetFormatter
+ * SetOutput
+ * ReplaceHooks
+ * introduction of go module
+ * an indent configuration for the json formatter
+ * output colour support for windows
+ * the field sort function is now configurable for text formatter
+ * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+ * a new api WithTime which allows to easily force the time of the log entry
+ which is mostly useful for logger wrapper
+ * a fix reverting the immutability of the entry given as parameter to the hooks
+ a new configuration field of the json formatter in order to put all the fields
+ in a nested dictionnary
+ * a new SetOutput method in the Logger
+ * a new configuration of the textformatter to configure the name of the default keys
+ * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..d1d4a85
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,515 @@
+# Logrus [](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://travis-ci.org/sirupsen/logrus) [](https://pkg.go.dev/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Logrus is in maintenance-mode.** We will not be introducing new features. It's
+simply too hard to do in a way that won't break many people's projects, which is
+the last thing you want from your Logging library (again...).
+
+This does not mean Logrus is dead. Logrus will continue to be maintained for
+security, (backwards compatible) bug fixes, and performance (where we are
+limited by the interface).
+
+I believe Logrus' biggest contribution is to have played a part in today's
+widespread use of structured logging in Golang. There doesn't seem to be a
+reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
+community has built those independently. Many fantastic alternatives have sprung
+up. Logrus would look like those, had it been re-designed with what we know
+about structured logging in Go today. Check out, for example,
+[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
+
+[zerolog]: https://github.com/rs/zerolog
+[zap]: https://github.com/uber-go/zap
+[apex]: https://github.com/apex/log
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```text
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+ log.SetFormatter(&log.TextFormatter{
+ DisableColors: true,
+ FullTimestamp: true,
+ })
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
+environment via benchmarks:
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "os"
+ "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * When colors are enabled, levels are truncated to 4 characters by default. To disable
+ truncation set the `DisableLevelTruncation` field to `true`.
+ * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
+* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
+* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
+* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := test.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8fd189e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,76 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
+
+// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func DeferExitHandler(handler func()) {
+ handlers = append([]func(){handler}, handlers...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..df9d65c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go
new file mode 100644
index 0000000..c7787f7
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go
@@ -0,0 +1,43 @@
+package logrus
+
+import (
+ "bytes"
+ "sync"
+)
+
+var (
+ bufferPool BufferPool
+)
+
+type BufferPool interface {
+ Put(*bytes.Buffer)
+ Get() *bytes.Buffer
+}
+
+type defaultPool struct {
+ pool *sync.Pool
+}
+
+func (p *defaultPool) Put(buf *bytes.Buffer) {
+ p.pool.Put(buf)
+}
+
+func (p *defaultPool) Get() *bytes.Buffer {
+ return p.pool.Get().(*bytes.Buffer)
+}
+
+// SetBufferPool allows to replace the default logrus buffer pool
+// to better meets the specific needs of an application.
+func SetBufferPool(bp BufferPool) {
+ bufferPool = bp
+}
+
+func init() {
+ SetBufferPool(&defaultPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ },
+ })
+}
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..71cdbbc
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,442 @@
+package logrus
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+
+ // qualified package name, cached at first use
+ logrusPackage string
+
+ // Positions in the call stack when tracing to report the calling method
+ minimumCallerDepth int
+
+ // Used for caller information initialisation
+ callerInitOnce sync.Once
+)
+
+const (
+ maximumCallerDepth int = 25
+ knownLogrusFrames int = 4
+)
+
+func init() {
+ // start at the bottom of the stack before the package-name cache is primed
+ minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+ Level Level
+
+ // Calling method, with package name
+ Caller *runtime.Frame
+
+ // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), a Buffer may be set to entry
+ Buffer *bytes.Buffer
+
+ // Contains the context set by the user. Useful for hook processing etc.
+ Context context.Context
+
+ // err may contain a field formatting error
+ err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, plus one optional. Give a little extra room.
+ Data: make(Fields, 6),
+ }
+}
+
+func (entry *Entry) Dup() *Entry {
+ data := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
+}
+
+// Returns the bytes representation of this entry from the formatter.
+func (entry *Entry) Bytes() ([]byte, error) {
+ return entry.Logger.Formatter.Format(entry)
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Bytes()
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a context to the Entry.
+func (entry *Entry) WithContext(ctx context.Context) *Entry {
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ fieldErr := entry.err
+ for k, v := range fields {
+ isErrField := false
+ if t := reflect.TypeOf(v); t != nil {
+ switch {
+ case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
+ isErrField = true
+ }
+ }
+ if isErrField {
+ tmp := fmt.Sprintf("can not add field %q", k)
+ if fieldErr != "" {
+ fieldErr = entry.err + ", " + tmp
+ } else {
+ fieldErr = tmp
+ }
+ } else {
+ data[k] = v
+ }
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+ for {
+ lastPeriod := strings.LastIndex(f, ".")
+ lastSlash := strings.LastIndex(f, "/")
+ if lastPeriod > lastSlash {
+ f = f[:lastPeriod]
+ } else {
+ break
+ }
+ }
+
+ return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+ // cache this package's fully-qualified name
+ callerInitOnce.Do(func() {
+ pcs := make([]uintptr, maximumCallerDepth)
+ _ = runtime.Callers(0, pcs)
+
+ // dynamic get the package name and the minimum caller depth
+ for i := 0; i < maximumCallerDepth; i++ {
+ funcName := runtime.FuncForPC(pcs[i]).Name()
+ if strings.Contains(funcName, "getCaller") {
+ logrusPackage = getPackageName(funcName)
+ break
+ }
+ }
+
+ minimumCallerDepth = knownLogrusFrames
+ })
+
+ // Restrict the lookback frames to avoid runaway lookups
+ pcs := make([]uintptr, maximumCallerDepth)
+ depth := runtime.Callers(minimumCallerDepth, pcs)
+ frames := runtime.CallersFrames(pcs[:depth])
+
+ for f, again := frames.Next(); again; f, again = frames.Next() {
+ pkg := getPackageName(f.Function)
+
+ // If the caller isn't part of this package, we're done
+ if pkg != logrusPackage {
+ return &f //nolint:scopelint
+ }
+ }
+
+ // if we got here, we failed to find the caller's context
+ return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+ return entry.Logger != nil &&
+ entry.Logger.ReportCaller &&
+ entry.Caller != nil
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+
+ newEntry := entry.Dup()
+
+ if newEntry.Time.IsZero() {
+ newEntry.Time = time.Now()
+ }
+
+ newEntry.Level = level
+ newEntry.Message = msg
+
+ newEntry.Logger.mu.Lock()
+ reportCaller := newEntry.Logger.ReportCaller
+ bufPool := newEntry.getBufferPool()
+ newEntry.Logger.mu.Unlock()
+
+ if reportCaller {
+ newEntry.Caller = getCaller()
+ }
+
+ newEntry.fireHooks()
+ buffer = bufPool.Get()
+ defer func() {
+ newEntry.Buffer = nil
+ buffer.Reset()
+ bufPool.Put(buffer)
+ }()
+ buffer.Reset()
+ newEntry.Buffer = buffer
+
+ newEntry.write()
+
+ newEntry.Buffer = nil
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(newEntry)
+ }
+}
+
+func (entry *Entry) getBufferPool() (pool BufferPool) {
+ if entry.Logger.BufferPool != nil {
+ return entry.Logger.BufferPool
+ }
+ return bufferPool
+}
+
+func (entry *Entry) fireHooks() {
+ var tmpHooks LevelHooks
+ entry.Logger.mu.Lock()
+ tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
+ for k, v := range entry.Logger.Hooks {
+ tmpHooks[k] = v
+ }
+ entry.Logger.mu.Unlock()
+
+ err := tmpHooks.Fire(entry.Level, entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ }
+}
+
+func (entry *Entry) write() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ return
+ }
+ if _, err := entry.Logger.Out.Write(serialized); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+}
+
+// Log will log a message at the level given as parameter.
+// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
+// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
+func (entry *Entry) Log(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.log(level, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+ entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ entry.Log(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ entry.Log(PanicLevel, args...)
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+ entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ entry.Logf(FatalLevel, format, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+ entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ entry.Logln(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..017c30c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,270 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "time"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+ std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+ return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithContext creates an entry from the standard logger and adds a context to it.
+func WithContext(ctx context.Context) *Entry {
+ return std.WithContext(ctx)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// WithTime creates an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+ return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+ std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// TraceFn logs a message from a func at level Trace on the standard logger.
+func TraceFn(fn LogFunction) {
+ std.TraceFn(fn)
+}
+
+// DebugFn logs a message from a func at level Debug on the standard logger.
+func DebugFn(fn LogFunction) {
+ std.DebugFn(fn)
+}
+
+// PrintFn logs a message from a func at level Info on the standard logger.
+func PrintFn(fn LogFunction) {
+ std.PrintFn(fn)
+}
+
+// InfoFn logs a message from a func at level Info on the standard logger.
+func InfoFn(fn LogFunction) {
+ std.InfoFn(fn)
+}
+
+// WarnFn logs a message from a func at level Warn on the standard logger.
+func WarnFn(fn LogFunction) {
+ std.WarnFn(fn)
+}
+
+// WarningFn logs a message from a func at level Warn on the standard logger.
+func WarningFn(fn LogFunction) {
+ std.WarningFn(fn)
+}
+
+// ErrorFn logs a message from a func at level Error on the standard logger.
+func ErrorFn(fn LogFunction) {
+ std.ErrorFn(fn)
+}
+
+// PanicFn logs a message from a func at level Panic on the standard logger.
+func PanicFn(fn LogFunction) {
+ std.PanicFn(fn)
+}
+
+// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
+func FatalFn(fn LogFunction) {
+ std.FatalFn(fn)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+ std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+ std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+ defaultTimestampFormat = time.RFC3339
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+ FieldKeyLogrusError = "logrus_error"
+ FieldKeyFunc = "func"
+ FieldKeyFile = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+ timeKey := fieldMap.resolve(FieldKeyTime)
+ if t, ok := data[timeKey]; ok {
+ data["fields."+timeKey] = t
+ delete(data, timeKey)
+ }
+
+ msgKey := fieldMap.resolve(FieldKeyMsg)
+ if m, ok := data[msgKey]; ok {
+ data["fields."+msgKey] = m
+ delete(data, msgKey)
+ }
+
+ levelKey := fieldMap.resolve(FieldKeyLevel)
+ if l, ok := data[levelKey]; ok {
+ data["fields."+levelKey] = l
+ delete(data, levelKey)
+ }
+
+ logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+ if l, ok := data[logrusErrKey]; ok {
+ data["fields."+logrusErrKey] = l
+ delete(data, logrusErrKey)
+ }
+
+ // If reportCaller is not set, 'func' will not conflict.
+ if reportCaller {
+ funcKey := fieldMap.resolve(FieldKeyFunc)
+ if l, ok := data[funcKey]; ok {
+ data["fields."+funcKey] = l
+ }
+ fileKey := fieldMap.resolve(FieldKeyFile)
+ if l, ok := data[fileKey]; ok {
+ data["fields."+fileKey] = l
+ }
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..c96dc56
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,128 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "runtime"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // DisableHTMLEscape allows disabling html escaping in output
+ DisableHTMLEscape bool
+
+ // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+ DataKey string
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // FieldKeyFunc: "@caller",
+ // },
+ // }
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the json data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from json fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ // PrettyPrint will indent all json logs
+ PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+4)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+
+ if f.DataKey != "" {
+ newData := make(Fields, 4)
+ newData[f.DataKey] = data
+ data = newData
+ }
+
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+
+ if entry.err != "" {
+ data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+ }
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+ if entry.HasCaller() {
+ funcVal := entry.Caller.Function
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+ if funcVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
+ }
+ if fileVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
+ }
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ encoder := json.NewEncoder(b)
+ encoder.SetEscapeHTML(!f.DisableHTMLEscape)
+ if f.PrettyPrint {
+ encoder.SetIndent("", " ")
+ }
+ if err := encoder.Encode(data); err != nil {
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..5ff0aef
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,417 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// LogFunction For big messages, it can be more efficient to pass a function
+// and only call it if the log level is actually enables rather than
+// generating the log message and then checking if the level is enabled
+type LogFunction func() []interface{}
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventurous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+
+ // Flag for whether to log caller info (off by default)
+ ReportCaller bool
+
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged.
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+ // Function to exit the application, defaults to `os.Exit()`
+ ExitFunc exitFunc
+ // The buffer pool used to format the log. If it is nil, the default global
+ // buffer pool will be used.
+ BufferPool BufferPool
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &logrus.Logger{
+// Out: os.Stderr,
+// Formatter: new(logrus.TextFormatter),
+// Hooks: make(logrus.LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ ExitFunc: os.Exit,
+ ReportCaller: false,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ entry.Data = map[string]interface{}{}
+ logger.entryPool.Put(entry)
+}
+
+// WithField allocates a new entry and adds a field to it.
+// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
+// this new returned entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+// Add a context to the log entry.
+func (logger *Logger) WithContext(ctx context.Context) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithContext(ctx)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logf(level, format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+ logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ logger.Logf(FatalLevel, format, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ logger.Logf(PanicLevel, format, args...)
+}
+
+// Log will log a message at the level given as parameter.
+// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
+// For this behaviour Logger.Panic or Logger.Fatal should be used instead.
+func (logger *Logger) Log(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) LogFn(level Level, fn LogFunction) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, fn()...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+ logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Print(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ logger.Log(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) TraceFn(fn LogFunction) {
+ logger.LogFn(TraceLevel, fn)
+}
+
+func (logger *Logger) DebugFn(fn LogFunction) {
+ logger.LogFn(DebugLevel, fn)
+}
+
+func (logger *Logger) InfoFn(fn LogFunction) {
+ logger.LogFn(InfoLevel, fn)
+}
+
+func (logger *Logger) PrintFn(fn LogFunction) {
+ entry := logger.newEntry()
+ entry.Print(fn()...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) WarnFn(fn LogFunction) {
+ logger.LogFn(WarnLevel, fn)
+}
+
+func (logger *Logger) WarningFn(fn LogFunction) {
+ logger.WarnFn(fn)
+}
+
+func (logger *Logger) ErrorFn(fn LogFunction) {
+ logger.LogFn(ErrorLevel, fn)
+}
+
+func (logger *Logger) FatalFn(fn LogFunction) {
+ logger.LogFn(FatalLevel, fn)
+ logger.Exit(1)
+}
+
+func (logger *Logger) PanicFn(fn LogFunction) {
+ logger.LogFn(PanicLevel, fn)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logln(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+ logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ logger.Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ logger.Logln(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+ runHandlers()
+ if logger.ExitFunc == nil {
+ logger.ExitFunc = os.Exit
+ }
+ logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+ return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+ return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+ logger.mu.Lock()
+ oldHooks := logger.Hooks
+ logger.Hooks = hooks
+ logger.mu.Unlock()
+ return oldHooks
+}
+
+// SetBufferPool sets the logger buffer pool.
+func (logger *Logger) SetBufferPool(pool BufferPool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.BufferPool = pool
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..2f16224
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ if b, err := level.MarshalText(); err == nil {
+ return string(b)
+ } else {
+ return "unknown"
+ }
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ case "trace":
+ return TraceLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+ l, err := ParseLevel(string(text))
+ if err != nil {
+ return err
+ }
+
+ *level = l
+
+ return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+ switch level {
+ case TraceLevel:
+ return []byte("trace"), nil
+ case DebugLevel:
+ return []byte("debug"), nil
+ case InfoLevel:
+ return []byte("info"), nil
+ case WarnLevel:
+ return []byte("warning"), nil
+ case ErrorLevel:
+ return []byte("error"), nil
+ case FatalLevel:
+ return []byte("fatal"), nil
+ case PanicLevel:
+ return []byte("panic"), nil
+ }
+
+ return nil, fmt.Errorf("not a valid logrus level %d", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+ TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+ // TraceLevel level. Designates finer-grained informational events than the Debug.
+ TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+
+ // IsDebugEnabled() bool
+ // IsInfoEnabled() bool
+ // IsWarnEnabled() bool
+ // IsErrorEnabled() bool
+ // IsFatalEnabled() bool
+ // IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+ FieldLogger
+ Tracef(format string, args ...interface{})
+ Trace(args ...interface{})
+ Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 0000000..4997899
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+// +build !js
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 0000000..ebdae3e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,7 @@
+// +build js
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 0000000..97af92c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
+// +build js nacl plan9
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..3293fb3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
+// +build !appengine,!js,!windows,!nacl,!plan9
+
+package logrus
+
+import (
+ "io"
+ "os"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return isTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 0000000..f6710b3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
+package logrus
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 0000000..04748b8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
+// +build linux aix zos
+// +build !js
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..2879eb5
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,27 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/sys/windows"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ handle := windows.Handle(v.Fd())
+ var mode uint32
+ if err := windows.GetConsoleMode(handle, &mode); err != nil {
+ return false
+ }
+ mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ if err := windows.SetConsoleMode(handle, mode); err != nil {
+ return false
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..be2c6ef
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,339 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ red = 31
+ yellow = 33
+ blue = 36
+ gray = 37
+)
+
+var baseTimestamp time.Time
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Force quoting of all values
+ ForceQuote bool
+
+ // DisableQuote disables quoting for all values.
+ // DisableQuote will have a lower priority than ForceQuote.
+ // If both of them are set to true, quote will be forced on all values.
+ DisableQuote bool
+
+ // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+ EnvironmentOverrideColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // The keys sorting function, when uninitialized it uses sort.Strings.
+ SortingFunc func([]string)
+
+ // Disables the truncation of the level text to 4 characters.
+ DisableLevelTruncation bool
+
+ // PadLevelText Adds padding the level text so that all the levels output at the same length
+ // PadLevelText is a superset of the DisableLevelTruncation option
+ PadLevelText bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &TextFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message"}}
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ terminalInitOnce sync.Once
+
+ // The max length of the level text, generated dynamically on init
+ levelTextMaxLength int
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if entry.Logger != nil {
+ f.isTerminal = checkIfTerminal(entry.Logger.Out)
+ }
+ // Get the max length of the level text
+ for _, level := range AllLevels {
+ levelTextLength := utf8.RuneCount([]byte(level.String()))
+ if levelTextLength > f.levelTextMaxLength {
+ f.levelTextMaxLength = levelTextLength
+ }
+ }
+}
+
+func (f *TextFormatter) isColored() bool {
+ isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+ if f.EnvironmentOverrideColors {
+ switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
+ case ok && force != "0":
+ isColored = true
+ case ok && force == "0", os.Getenv("CLICOLOR") == "0":
+ isColored = false
+ }
+ }
+
+ return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+ keys := make([]string, 0, len(data))
+ for k := range data {
+ keys = append(keys, k)
+ }
+
+ var funcVal, fileVal string
+
+ fixedKeys := make([]string, 0, 4+len(data))
+ if !f.DisableTimestamp {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+ }
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+ if entry.Message != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+ }
+ if entry.err != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+ }
+ if entry.HasCaller() {
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ } else {
+ funcVal = entry.Caller.Function
+ fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ }
+
+ if funcVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
+ }
+ if fileVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
+ }
+ }
+
+ if !f.DisableSorting {
+ if f.SortingFunc == nil {
+ sort.Strings(keys)
+ fixedKeys = append(fixedKeys, keys...)
+ } else {
+ if !f.isColored() {
+ fixedKeys = append(fixedKeys, keys...)
+ f.SortingFunc(fixedKeys)
+ } else {
+ f.SortingFunc(keys)
+ }
+ }
+ } else {
+ fixedKeys = append(fixedKeys, keys...)
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ f.terminalInitOnce.Do(func() { f.init(entry) })
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+ if f.isColored() {
+ f.printColored(b, entry, keys, data, timestampFormat)
+ } else {
+
+ for _, key := range fixedKeys {
+ var value interface{}
+ switch {
+ case key == f.FieldMap.resolve(FieldKeyTime):
+ value = entry.Time.Format(timestampFormat)
+ case key == f.FieldMap.resolve(FieldKeyLevel):
+ value = entry.Level.String()
+ case key == f.FieldMap.resolve(FieldKeyMsg):
+ value = entry.Message
+ case key == f.FieldMap.resolve(FieldKeyLogrusError):
+ value = entry.err
+ case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+ value = funcVal
+ case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+ value = fileVal
+ default:
+ value = data[key]
+ }
+ f.appendKeyValue(b, key, value)
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel, TraceLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ case InfoLevel:
+ levelColor = blue
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())
+ if !f.DisableLevelTruncation && !f.PadLevelText {
+ levelText = levelText[0:4]
+ }
+ if f.PadLevelText {
+ // Generates the format string used in the next line, for example "%-6s" or "%-7s".
+ // Based on the max level text length.
+ formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s"
+ // Formats the level text by appending spaces up to the max length, for example:
+ // - "INFO "
+ // - "WARNING"
+ levelText = fmt.Sprintf(formatString, levelText)
+ }
+
+ // Remove a single newline if it already exists in the message to keep
+ // the behavior of logrus text_formatter the same as the stdlib log package
+ entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+ caller := ""
+ if entry.HasCaller() {
+ funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+
+ if fileVal == "" {
+ caller = funcVal
+ } else if funcVal == "" {
+ caller = fileVal
+ } else {
+ caller = fileVal + " " + funcVal
+ }
+ }
+
+ switch {
+ case f.DisableTimestamp:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+ case !f.FullTimestamp:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+ default:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+ }
+ for _, k := range keys {
+ v := data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.ForceQuote {
+ return true
+ }
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ if f.DisableQuote {
+ return false
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
+ }
+
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..074fd4b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,102 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+ "strings"
+)
+
+// Writer at INFO level. See WriterLevel for details.
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+// WriterLevel returns an io.Writer that can be used to write arbitrary text to
+// the logger at the given log level. Each line written to the writer will be
+// printed in the usual way using formatters and hooks. The writer is part of an
+// io.Pipe and it is the callers responsibility to close the writer when done.
+// This can be used to override the standard library logger easily.
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+// Writer returns an io.Writer that writes to the logger at the info log level
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+// WriterLevel returns an io.Writer that writes to the logger at the given log level
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ // Determine which log function to use based on the specified log level
+ switch level {
+ case TraceLevel:
+ printFunc = entry.Trace
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ // Start a new goroutine to scan the input and write it to the logger using the specified print function.
+ // It splits the input into chunks of up to 64KB to avoid buffer overflows.
+ go entry.writerScanner(reader, printFunc)
+
+ // Set a finalizer function to close the writer when it is garbage collected
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+// writerScanner scans the input from the reader and writes it to the logger
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+
+ // Set the buffer size to the maximum token size to avoid buffer overflows
+ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
+
+ // Define a split function to split the input into chunks of up to 64KB
+ chunkSize := bufio.MaxScanTokenSize // 64KB
+ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
+ if len(data) >= chunkSize {
+ return chunkSize, data[:chunkSize], nil
+ }
+
+ return bufio.ScanLines(data, atEOF)
+ }
+
+ // Use the custom split function to split the input
+ scanner.Split(splitFunc)
+
+ // Scan the input and write it to the logger using the specified print function
+ for scanner.Scan() {
+ printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
+ }
+
+ // If there was an error while scanning the input, log an error
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+
+ // Close the reader when we are done
+ reader.Close()
+}
+
+// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/sourcegraph/conc/.golangci.yml b/vendor/github.com/sourcegraph/conc/.golangci.yml
new file mode 100644
index 0000000..ae65a76
--- /dev/null
+++ b/vendor/github.com/sourcegraph/conc/.golangci.yml
@@ -0,0 +1,11 @@
+linters:
+ disable-all: true
+ enable:
+ - errcheck
+ - godot
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - typecheck
+ - unused
diff --git a/vendor/github.com/sourcegraph/conc/LICENSE b/vendor/github.com/sourcegraph/conc/LICENSE
new file mode 100644
index 0000000..1081f4e
--- /dev/null
+++ b/vendor/github.com/sourcegraph/conc/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Sourcegraph
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sourcegraph/conc/README.md b/vendor/github.com/sourcegraph/conc/README.md
new file mode 100644
index 0000000..1c87c3c
--- /dev/null
+++ b/vendor/github.com/sourcegraph/conc/README.md
@@ -0,0 +1,464 @@
+
+
+# `conc`: better structured concurrency for go
+
+[](https://pkg.go.dev/github.com/sourcegraph/conc)
+[](https://sourcegraph.com/github.com/sourcegraph/conc)
+[](https://goreportcard.com/report/github.com/sourcegraph/conc)
+[](https://codecov.io/gh/sourcegraph/conc)
+[](https://discord.gg/bvXQXmtRjN)
+
+`conc` is your toolbelt for structured concurrency in go, making common tasks
+easier and safer.
+
+```sh
+go get github.com/sourcegraph/conc
+```
+
+# At a glance
+
+- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup`
+- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner
+- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results
+- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible
+- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure
+- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks
+- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice
+- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice
+- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines
+
+All pools are created with
+[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New)
+or
+[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults),
+then configured with methods:
+
+- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool
+- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors
+- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error
+- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error
+- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored
+
+# Goals
+
+The main goals of the package are:
+1) Make it harder to leak goroutines
+2) Handle panics gracefully
+3) Make concurrent code easier to read
+
+## Goal #1: Make it harder to leak goroutines
+
+A common pain point when working with goroutines is cleaning them up. It's
+really easy to fire off a `go` statement and fail to properly wait for it to
+complete.
+
+`conc` takes the opinionated stance that all concurrency should be scoped.
+That is, goroutines should have an owner and that owner should always
+ensure that its owned goroutines exit properly.
+
+In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines
+are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and
+`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out
+of scope.
+
+In some cases, you might want a spawned goroutine to outlast the scope of the
+caller. In that case, you could pass a `WaitGroup` into the spawning function.
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ defer wg.Wait()
+
+ startTheThing(&wg)
+}
+
+func startTheThing(wg *conc.WaitGroup) {
+ wg.Go(func() { ... })
+}
+```
+
+For some more discussion on why scoped concurrency is nice, check out [this
+blog
+post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/).
+
+## Goal #2: Handle panics gracefully
+
+A frequent problem with goroutines in long-running applications is handling
+panics. A goroutine spawned without a panic handler will crash the whole process
+on panic. This is usually undesirable.
+
+However, if you do add a panic handler to a goroutine, what do you do with the
+panic once you catch it? Some options:
+1) Ignore it
+2) Log it
+3) Turn it into an error and return that to the goroutine spawner
+4) Propagate the panic to the goroutine spawner
+
+Ignoring panics is a bad idea since panics usually mean there is actually
+something wrong and someone should fix it.
+
+Just logging panics isn't great either because then there is no indication to the spawner
+that something bad happened, and it might just continue on as normal even though your
+program is in a really bad state.
+
+Both (3) and (4) are reasonable options, but both require the goroutine to have
+an owner that can actually receive the message that something went wrong. This
+is generally not true with a goroutine spawned with `go`, but in the `conc`
+package, all goroutines have an owner that must collect the spawned goroutine.
+In the conc package, any call to `Wait()` will panic if any of the spawned goroutines
+panicked. Additionally, it decorates the panic value with a stacktrace from the child
+goroutine so that you don't lose information about what caused the panic.
+
+Doing this all correctly every time you spawn something with `go` is not
+trivial and it requires a lot of boilerplate that makes the important parts of
+the code more difficult to read, so `conc` does this for you.
+
+
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ wg.Go(doSomethingThatMightPanic)
+ // panics with a nice stacktrace
+ wg.Wait()
+}
+```
+
+
+
+
+## Goal #3: Make concurrent code easier to read
+
+Doing concurrency correctly is difficult. Doing it in a way that doesn't
+obfuscate what the code is actually doing is more difficult. The `conc` package
+attempts to make common operations easier by abstracting as much boilerplate
+complexity as possible.
+
+Want to run a set of concurrent tasks with a bounded set of goroutines? Use
+`pool.New()`. Want to process an ordered stream of results concurrently, but
+still maintain order? Try `stream.New()`. What about a concurrent map over
+a slice? Take a peek at `iter.Map()`.
+
+Browse some examples below for some comparisons with doing these by hand.
+
+# Examples
+
+Each of these examples forgoes propagating panics for simplicity. To see
+what kind of complexity that would add, check out the "Goal #2" header above.
+
+Spawn a set of goroutines and waiting for them to finish:
+
+
+
+
stdlib
+
conc
+
+
+
+
+```go
+func main() {
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // crashes on panic!
+ doSomething()
+ }()
+ }
+ wg.Wait()
+}
+```
+
+
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Go(doSomething)
+ }
+ wg.Wait()
+}
+```
+
+
+
+
+Process each element of a stream in a static pool of goroutines:
+
+
+
+
stdlib
+
conc
+
+
+
+
+```go
+func process(stream chan int) {
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for elem := range stream {
+ handle(elem)
+ }
+ }()
+ }
+ wg.Wait()
+}
+```
+
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 0000000..99b47c3
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go
new file mode 100644
index 0000000..a180d0e
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/attr.go
@@ -0,0 +1,102 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "fmt"
+ "time"
+)
+
+// An Attr is a key-value pair.
+type Attr struct {
+ Key string
+ Value Value
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int converts an int to an int64 and returns
+// an Attr with that value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Uint64 returns an Attr for a uint64.
+func Uint64(key string, v uint64) Attr {
+ return Attr{key, Uint64Value(v)}
+}
+
+// Float64 returns an Attr for a floating-point number.
+func Float64(key string, v float64) Attr {
+ return Attr{key, Float64Value(v)}
+}
+
+// Bool returns an Attr for a bool.
+func Bool(key string, v bool) Attr {
+ return Attr{key, BoolValue(v)}
+}
+
+// Time returns an Attr for a time.Time.
+// It discards the monotonic portion.
+func Time(key string, v time.Time) Attr {
+ return Attr{key, TimeValue(v)}
+}
+
+// Duration returns an Attr for a time.Duration.
+func Duration(key string, v time.Duration) Attr {
+ return Attr{key, DurationValue(v)}
+}
+
+// Group returns an Attr for a Group Value.
+// The first argument is the key; the remaining arguments
+// are converted to Attrs as in [Logger.Log].
+//
+// Use Group to collect several key-value pairs under a single
+// key on a log line, or as the result of LogValue
+// in order to log a single value as multiple Attrs.
+func Group(key string, args ...any) Attr {
+ return Attr{key, GroupValue(argsToAttrSlice(args)...)}
+}
+
+func argsToAttrSlice(args []any) []Attr {
+ var (
+ attr Attr
+ attrs []Attr
+ )
+ for len(args) > 0 {
+ attr, args = argsToAttr(args)
+ attrs = append(attrs, attr)
+ }
+ return attrs
+}
+
+// Any returns an Attr for the supplied value.
+// See [Value.AnyValue] for how values are treated.
+func Any(key string, value any) Attr {
+ return Attr{key, AnyValue(value)}
+}
+
+// Equal reports whether a and b have equal keys and values.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
+
+func (a Attr) String() string {
+ return fmt.Sprintf("%s=%s", a.Key, a.Value)
+}
+
+// isEmpty reports whether a has an empty key and a nil value.
+// That can be written as Attr{} or Any("", nil).
+func (a Attr) isEmpty() bool {
+ return a.Key == "" && a.Value.num == 0 && a.Value.any == nil
+}
diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go
new file mode 100644
index 0000000..4beaf86
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/doc.go
@@ -0,0 +1,316 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package slog provides structured logging,
+in which log records include a message,
+a severity level, and various other attributes
+expressed as key-value pairs.
+
+It defines a type, [Logger],
+which provides several methods (such as [Logger.Info] and [Logger.Error])
+for reporting events of interest.
+
+Each Logger is associated with a [Handler].
+A Logger output method creates a [Record] from the method arguments
+and passes it to the Handler, which decides how to handle it.
+There is a default Logger accessible through top-level functions
+(such as [Info] and [Error]) that call the corresponding Logger methods.
+
+A log record consists of a time, a level, a message, and a set of key-value
+pairs, where the keys are strings and the values may be of any type.
+As an example,
+
+ slog.Info("hello", "count", 3)
+
+creates a record containing the time of the call,
+a level of Info, the message "hello", and a single
+pair with key "count" and value 3.
+
+The [Info] top-level function calls the [Logger.Info] method on the default Logger.
+In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
+Besides these convenience methods for common levels,
+there is also a [Logger.Log] method which takes the level as an argument.
+Each of these methods has a corresponding top-level function that uses the
+default logger.
+
+The default handler formats the log record's message, time, level, and attributes
+as a string and passes it to the [log] package.
+
+ 2022/11/08 15:28:26 INFO hello count=3
+
+For more control over the output format, create a logger with a different handler.
+This statement uses [New] to create a new logger with a TextHandler
+that writes structured records in text form to standard error:
+
+ logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
+
+[TextHandler] output is a sequence of key=value pairs, easily and unambiguously
+parsed by machine. This statement:
+
+ logger.Info("hello", "count", 3)
+
+produces this output:
+
+ time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
+
+The package also provides [JSONHandler], whose output is line-delimited JSON:
+
+ logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
+ logger.Info("hello", "count", 3)
+
+produces this output:
+
+ {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
+
+Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
+There are options for setting the minimum level (see Levels, below),
+displaying the source file and line of the log call, and
+modifying attributes before they are logged.
+
+Setting a logger as the default with
+
+ slog.SetDefault(logger)
+
+will cause the top-level functions like [Info] to use it.
+[SetDefault] also updates the default logger used by the [log] package,
+so that existing applications that use [log.Printf] and related functions
+will send log records to the logger's handler without needing to be rewritten.
+
+Some attributes are common to many log calls.
+For example, you may wish to include the URL or trace identifier of a server request
+with all log events arising from the request.
+Rather than repeat the attribute with every log call, you can use [Logger.With]
+to construct a new Logger containing the attributes:
+
+ logger2 := logger.With("url", r.URL)
+
+The arguments to With are the same key-value pairs used in [Logger.Info].
+The result is a new Logger with the same handler as the original, but additional
+attributes that will appear in the output of every call.
+
+# Levels
+
+A [Level] is an integer representing the importance or severity of a log event.
+The higher the level, the more severe the event.
+This package defines constants for the most common levels,
+but any int can be used as a level.
+
+In an application, you may wish to log messages only at a certain level or greater.
+One common configuration is to log messages at Info or higher levels,
+suppressing debug logging until it is needed.
+The built-in handlers can be configured with the minimum level to output by
+setting [HandlerOptions.Level].
+The program's `main` function typically does this.
+The default value is LevelInfo.
+
+Setting the [HandlerOptions.Level] field to a [Level] value
+fixes the handler's minimum level throughout its lifetime.
+Setting it to a [LevelVar] allows the level to be varied dynamically.
+A LevelVar holds a Level and is safe to read or write from multiple
+goroutines.
+To vary the level dynamically for an entire program, first initialize
+a global LevelVar:
+
+ var programLevel = new(slog.LevelVar) // Info by default
+
+Then use the LevelVar to construct a handler, and make it the default:
+
+ h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
+ slog.SetDefault(slog.New(h))
+
+Now the program can change its logging level with a single statement:
+
+ programLevel.Set(slog.LevelDebug)
+
+# Groups
+
+Attributes can be collected into groups.
+A group has a name that is used to qualify the names of its attributes.
+How this qualification is displayed depends on the handler.
+[TextHandler] separates the group and attribute names with a dot.
+[JSONHandler] treats each group as a separate JSON object, with the group name as the key.
+
+Use [Group] to create a Group attribute from a name and a list of key-value pairs:
+
+ slog.Group("request",
+ "method", r.Method,
+ "url", r.URL)
+
+TextHandler would display this group as
+
+ request.method=GET request.url=http://example.com
+
+JSONHandler would display it as
+
+ "request":{"method":"GET","url":"http://example.com"}
+
+Use [Logger.WithGroup] to qualify all of a Logger's output
+with a group name. Calling WithGroup on a Logger results in a
+new Logger with the same Handler as the original, but with all
+its attributes qualified by the group name.
+
+This can help prevent duplicate attribute keys in large systems,
+where subsystems might use the same keys.
+Pass each subsystem a different Logger with its own group name so that
+potential duplicates are qualified:
+
+ logger := slog.Default().With("id", systemID)
+ parserLogger := logger.WithGroup("parser")
+ parseInput(input, parserLogger)
+
+When parseInput logs with parserLogger, its keys will be qualified with "parser",
+so even if it uses the common key "id", the log line will have distinct keys.
+
+# Contexts
+
+Some handlers may wish to include information from the [context.Context] that is
+available at the call site. One example of such information
+is the identifier for the current span when tracing is enabled.
+
+The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
+argument, as do their corresponding top-level functions.
+
+Although the convenience methods on Logger (Info and so on) and the
+corresponding top-level functions do not take a context, the alternatives ending
+in "Context" do. For example,
+
+ slog.InfoContext(ctx, "message")
+
+It is recommended to pass a context to an output method if one is available.
+
+# Attrs and Values
+
+An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
+alternating keys and values. The statement
+
+ slog.Info("hello", slog.Int("count", 3))
+
+behaves the same as
+
+ slog.Info("hello", "count", 3)
+
+There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
+for common types, as well as the function [Any] for constructing Attrs of any
+type.
+
+The value part of an Attr is a type called [Value].
+Like an [any], a Value can hold any Go value,
+but it can represent typical values, including all numbers and strings,
+without an allocation.
+
+For the most efficient log output, use [Logger.LogAttrs].
+It is similar to [Logger.Log] but accepts only Attrs, not alternating
+keys and values; this allows it, too, to avoid allocation.
+
+The call
+
+ logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3))
+
+is the most efficient way to achieve the same output as
+
+ slog.Info("hello", "count", 3)
+
+# Customizing a type's logging behavior
+
+If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
+method is used for logging. You can use this to control how values of the type
+appear in logs. For example, you can redact secret information like passwords,
+or gather a struct's fields in a Group. See the examples under [LogValuer] for
+details.
+
+A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
+method handles these cases carefully, avoiding infinite loops and unbounded recursion.
+Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly.
+
+# Wrapping output methods
+
+The logger functions use reflection over the call stack to find the file name
+and line number of the logging call within the application. This can produce
+incorrect source information for functions that wrap slog. For instance, if you
+define this function in file mylog.go:
+
+ func Infof(format string, args ...any) {
+ slog.Default().Info(fmt.Sprintf(format, args...))
+ }
+
+and you call it like this in main.go:
+
+ Infof(slog.Default(), "hello, %s", "world")
+
+then slog will report the source file as mylog.go, not main.go.
+
+A correct implementation of Infof will obtain the source location
+(pc) and pass it to NewRecord.
+The Infof function in the package-level example called "wrapping"
+demonstrates how to do this.
+
+# Working with Records
+
+Sometimes a Handler will need to modify a Record
+before passing it on to another Handler or backend.
+A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
+and hidden fields that refer to state (such as attributes) indirectly. This
+means that modifying a simple copy of a Record (e.g. by calling
+[Record.Add] or [Record.AddAttrs] to add attributes)
+may have unexpected effects on the original.
+Before modifying a Record, use [Clone] to
+create a copy that shares no state with the original,
+or create a new Record with [NewRecord]
+and build up its Attrs by traversing the old ones with [Record.Attrs].
+
+# Performance considerations
+
+If profiling your application demonstrates that logging is taking significant time,
+the following suggestions may help.
+
+If many log lines have a common attribute, use [Logger.With] to create a Logger with
+that attribute. The built-in handlers will format that attribute only once, at the
+call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
+and a well-written Handler should take advantage of it.
+
+The arguments to a log call are always evaluated, even if the log event is discarded.
+If possible, defer computation so that it happens only if the value is actually logged.
+For example, consider the call
+
+ slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
+
+The URL.String method will be called even if the logger discards Info-level events.
+Instead, pass the URL directly:
+
+ slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
+
+The built-in [TextHandler] will call its String method, but only
+if the log event is enabled.
+Avoiding the call to String also preserves the structure of the underlying value.
+For example [JSONHandler] emits the components of the parsed URL as a JSON object.
+If you want to avoid eagerly paying the cost of the String call
+without causing the handler to potentially inspect the structure of the value,
+wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
+
+You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
+calls. Say you need to log some expensive value:
+
+ slog.Debug("frobbing", "value", computeExpensiveValue(arg))
+
+Even if this line is disabled, computeExpensiveValue will be called.
+To avoid that, define a type implementing LogValuer:
+
+ type expensive struct { arg int }
+
+ func (e expensive) LogValue() slog.Value {
+ return slog.AnyValue(computeExpensiveValue(e.arg))
+ }
+
+Then use a value of that type in log calls:
+
+ slog.Debug("frobbing", "value", expensive{arg})
+
+Now computeExpensiveValue will only be called when the line is enabled.
+
+The built-in handlers acquire a lock before calling [io.Writer.Write]
+to ensure that each record is written in one piece. User-defined
+handlers are responsible for their own locking.
+*/
+package slog
diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go
new file mode 100644
index 0000000..74f8873
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/handler.go
@@ -0,0 +1,559 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strconv"
+ "sync"
+ "time"
+
+ "golang.org/x/exp/slices"
+ "golang.org/x/exp/slog/internal/buffer"
+)
+
+// A Handler handles log records produced by a Logger..
+//
+// A typical handler may print log records to standard error,
+// or write them to a file or database, or perhaps augment them
+// with additional attributes and pass them on to another handler.
+//
+// Any of the Handler's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Handler to
+// manage this concurrency.
+//
+// Users of the slog package should not invoke Handler methods directly.
+// They should use the methods of [Logger] instead.
+type Handler interface {
+ // Enabled reports whether the handler handles records at the given level.
+ // The handler ignores records whose level is lower.
+ // It is called early, before any arguments are processed,
+ // to save effort if the log event should be discarded.
+ // If called from a Logger method, the first argument is the context
+ // passed to that method, or context.Background() if nil was passed
+ // or the method does not take a context.
+ // The context is passed so Enabled can use its values
+ // to make a decision.
+ Enabled(context.Context, Level) bool
+
+ // Handle handles the Record.
+ // It will only be called when Enabled returns true.
+ // The Context argument is as for Enabled.
+ // It is present solely to provide Handlers access to the context's values.
+ // Canceling the context should not affect record processing.
+ // (Among other things, log messages may be necessary to debug a
+ // cancellation-related problem.)
+ //
+ // Handle methods that produce output should observe the following rules:
+ // - If r.Time is the zero time, ignore the time.
+ // - If r.PC is zero, ignore it.
+ // - Attr's values should be resolved.
+ // - If an Attr's key and value are both the zero value, ignore the Attr.
+ // This can be tested with attr.Equal(Attr{}).
+ // - If a group's key is empty, inline the group's Attrs.
+ // - If a group has no Attrs (even if it has a non-empty key),
+ // ignore it.
+ Handle(context.Context, Record) error
+
+ // WithAttrs returns a new Handler whose attributes consist of
+ // both the receiver's attributes and the arguments.
+ // The Handler owns the slice: it may retain, modify or discard it.
+ WithAttrs(attrs []Attr) Handler
+
+ // WithGroup returns a new Handler with the given group appended to
+ // the receiver's existing groups.
+ // The keys of all subsequent attributes, whether added by With or in a
+ // Record, should be qualified by the sequence of group names.
+ //
+ // How this qualification happens is up to the Handler, so long as
+ // this Handler's attribute keys differ from those of another Handler
+ // with a different sequence of group names.
+ //
+ // A Handler should treat WithGroup as starting a Group of Attrs that ends
+ // at the end of the log event. That is,
+ //
+ // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2))
+ //
+ // should behave like
+ //
+ // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
+ //
+ // If the name is empty, WithGroup returns the receiver.
+ WithGroup(name string) Handler
+}
+
+type defaultHandler struct {
+ ch *commonHandler
+ // log.Output, except for testing
+ output func(calldepth int, message string) error
+}
+
+func newDefaultHandler(output func(int, string) error) *defaultHandler {
+ return &defaultHandler{
+ ch: &commonHandler{json: false},
+ output: output,
+ }
+}
+
+func (*defaultHandler) Enabled(_ context.Context, l Level) bool {
+ return l >= LevelInfo
+}
+
+// Collect the level, attributes and message in a string and
+// write it with the default log.Logger.
+// Let the log.Logger handle time and file/line.
+func (h *defaultHandler) Handle(ctx context.Context, r Record) error {
+ buf := buffer.New()
+ buf.WriteString(r.Level.String())
+ buf.WriteByte(' ')
+ buf.WriteString(r.Message)
+ state := h.ch.newHandleState(buf, true, " ", nil)
+ defer state.free()
+ state.appendNonBuiltIns(r)
+
+ // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output]
+ return h.output(4, buf.String())
+}
+
+func (h *defaultHandler) WithAttrs(as []Attr) Handler {
+ return &defaultHandler{h.ch.withAttrs(as), h.output}
+}
+
+func (h *defaultHandler) WithGroup(name string) Handler {
+ return &defaultHandler{h.ch.withGroup(name), h.output}
+}
+
+// HandlerOptions are options for a TextHandler or JSONHandler.
+// A zero HandlerOptions consists entirely of default values.
+type HandlerOptions struct {
+ // AddSource causes the handler to compute the source code position
+ // of the log statement and add a SourceKey attribute to the output.
+ AddSource bool
+
+ // Level reports the minimum record level that will be logged.
+ // The handler discards records with lower levels.
+ // If Level is nil, the handler assumes LevelInfo.
+ // The handler calls Level.Level for each record processed;
+ // to adjust the minimum level dynamically, use a LevelVar.
+ Level Leveler
+
+ // ReplaceAttr is called to rewrite each non-group attribute before it is logged.
+ // The attribute's value has been resolved (see [Value.Resolve]).
+ // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded.
+ //
+ // The built-in attributes with keys "time", "level", "source", and "msg"
+ // are passed to this function, except that time is omitted
+ // if zero, and source is omitted if AddSource is false.
+ //
+ // The first argument is a list of currently open groups that contain the
+ // Attr. It must not be retained or modified. ReplaceAttr is never called
+ // for Group attributes, only their contents. For example, the attribute
+ // list
+ //
+ // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3)
+ //
+ // results in consecutive calls to ReplaceAttr with the following arguments:
+ //
+ // nil, Int("a", 1)
+ // []string{"g"}, Int("b", 2)
+ // nil, Int("c", 3)
+ //
+ // ReplaceAttr can be used to change the default keys of the built-in
+ // attributes, convert types (for example, to replace a `time.Time` with the
+ // integer seconds since the Unix epoch), sanitize personal information, or
+ // remove attributes from the output.
+ ReplaceAttr func(groups []string, a Attr) Attr
+}
+
+// Keys for "built-in" attributes.
+const (
+ // TimeKey is the key used by the built-in handlers for the time
+ // when the log method is called. The associated Value is a [time.Time].
+ TimeKey = "time"
+ // LevelKey is the key used by the built-in handlers for the level
+ // of the log call. The associated value is a [Level].
+ LevelKey = "level"
+ // MessageKey is the key used by the built-in handlers for the
+ // message of the log call. The associated value is a string.
+ MessageKey = "msg"
+ // SourceKey is the key used by the built-in handlers for the source file
+ // and line of the log call. The associated value is a string.
+ SourceKey = "source"
+)
+
+type commonHandler struct {
+ json bool // true => output JSON; false => output text
+ opts HandlerOptions
+ preformattedAttrs []byte
+ groupPrefix string // for text: prefix of groups opened in preformatting
+ groups []string // all groups started from WithGroup
+ nOpenGroups int // the number of groups opened in preformattedAttrs
+ mu sync.Mutex
+ w io.Writer
+}
+
+func (h *commonHandler) clone() *commonHandler {
+ // We can't use assignment because we can't copy the mutex.
+ return &commonHandler{
+ json: h.json,
+ opts: h.opts,
+ preformattedAttrs: slices.Clip(h.preformattedAttrs),
+ groupPrefix: h.groupPrefix,
+ groups: slices.Clip(h.groups),
+ nOpenGroups: h.nOpenGroups,
+ w: h.w,
+ }
+}
+
+// enabled reports whether l is greater than or equal to the
+// minimum level.
+func (h *commonHandler) enabled(l Level) bool {
+ minLevel := LevelInfo
+ if h.opts.Level != nil {
+ minLevel = h.opts.Level.Level()
+ }
+ return l >= minLevel
+}
+
+func (h *commonHandler) withAttrs(as []Attr) *commonHandler {
+ h2 := h.clone()
+ // Pre-format the attributes as an optimization.
+ prefix := buffer.New()
+ defer prefix.Free()
+ prefix.WriteString(h.groupPrefix)
+ state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix)
+ defer state.free()
+ if len(h2.preformattedAttrs) > 0 {
+ state.sep = h.attrSep()
+ }
+ state.openGroups()
+ for _, a := range as {
+ state.appendAttr(a)
+ }
+ // Remember the new prefix for later keys.
+ h2.groupPrefix = state.prefix.String()
+ // Remember how many opened groups are in preformattedAttrs,
+ // so we don't open them again when we handle a Record.
+ h2.nOpenGroups = len(h2.groups)
+ return h2
+}
+
+func (h *commonHandler) withGroup(name string) *commonHandler {
+ if name == "" {
+ return h
+ }
+ h2 := h.clone()
+ h2.groups = append(h2.groups, name)
+ return h2
+}
+
+func (h *commonHandler) handle(r Record) error {
+ state := h.newHandleState(buffer.New(), true, "", nil)
+ defer state.free()
+ if h.json {
+ state.buf.WriteByte('{')
+ }
+ // Built-in attributes. They are not in a group.
+ stateGroups := state.groups
+ state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups.
+ rep := h.opts.ReplaceAttr
+ // time
+ if !r.Time.IsZero() {
+ key := TimeKey
+ val := r.Time.Round(0) // strip monotonic to match Attr behavior
+ if rep == nil {
+ state.appendKey(key)
+ state.appendTime(val)
+ } else {
+ state.appendAttr(Time(key, val))
+ }
+ }
+ // level
+ key := LevelKey
+ val := r.Level
+ if rep == nil {
+ state.appendKey(key)
+ state.appendString(val.String())
+ } else {
+ state.appendAttr(Any(key, val))
+ }
+ // source
+ if h.opts.AddSource {
+ state.appendAttr(Any(SourceKey, r.source()))
+ }
+ key = MessageKey
+ msg := r.Message
+ if rep == nil {
+ state.appendKey(key)
+ state.appendString(msg)
+ } else {
+ state.appendAttr(String(key, msg))
+ }
+ state.groups = stateGroups // Restore groups passed to ReplaceAttrs.
+ state.appendNonBuiltIns(r)
+ state.buf.WriteByte('\n')
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ _, err := h.w.Write(*state.buf)
+ return err
+}
+
+func (s *handleState) appendNonBuiltIns(r Record) {
+ // preformatted Attrs
+ if len(s.h.preformattedAttrs) > 0 {
+ s.buf.WriteString(s.sep)
+ s.buf.Write(s.h.preformattedAttrs)
+ s.sep = s.h.attrSep()
+ }
+ // Attrs in Record -- unlike the built-in ones, they are in groups started
+ // from WithGroup.
+ s.prefix = buffer.New()
+ defer s.prefix.Free()
+ s.prefix.WriteString(s.h.groupPrefix)
+ s.openGroups()
+ r.Attrs(func(a Attr) bool {
+ s.appendAttr(a)
+ return true
+ })
+ if s.h.json {
+ // Close all open groups.
+ for range s.h.groups {
+ s.buf.WriteByte('}')
+ }
+ // Close the top-level object.
+ s.buf.WriteByte('}')
+ }
+}
+
+// attrSep returns the separator between attributes.
+func (h *commonHandler) attrSep() string {
+ if h.json {
+ return ","
+ }
+ return " "
+}
+
+// handleState holds state for a single call to commonHandler.handle.
+// The initial value of sep determines whether to emit a separator
+// before the next key, after which it stays true.
+type handleState struct {
+ h *commonHandler
+ buf *buffer.Buffer
+ freeBuf bool // should buf be freed?
+ sep string // separator to write before next key
+ prefix *buffer.Buffer // for text: key prefix
+ groups *[]string // pool-allocated slice of active groups, for ReplaceAttr
+}
+
+var groupPool = sync.Pool{New: func() any {
+ s := make([]string, 0, 10)
+ return &s
+}}
+
+func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState {
+ s := handleState{
+ h: h,
+ buf: buf,
+ freeBuf: freeBuf,
+ sep: sep,
+ prefix: prefix,
+ }
+ if h.opts.ReplaceAttr != nil {
+ s.groups = groupPool.Get().(*[]string)
+ *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...)
+ }
+ return s
+}
+
+func (s *handleState) free() {
+ if s.freeBuf {
+ s.buf.Free()
+ }
+ if gs := s.groups; gs != nil {
+ *gs = (*gs)[:0]
+ groupPool.Put(gs)
+ }
+}
+
+func (s *handleState) openGroups() {
+ for _, n := range s.h.groups[s.h.nOpenGroups:] {
+ s.openGroup(n)
+ }
+}
+
+// Separator for group names and keys.
+const keyComponentSep = '.'
+
+// openGroup starts a new group of attributes
+// with the given name.
+func (s *handleState) openGroup(name string) {
+ if s.h.json {
+ s.appendKey(name)
+ s.buf.WriteByte('{')
+ s.sep = ""
+ } else {
+ s.prefix.WriteString(name)
+ s.prefix.WriteByte(keyComponentSep)
+ }
+ // Collect group names for ReplaceAttr.
+ if s.groups != nil {
+ *s.groups = append(*s.groups, name)
+ }
+}
+
+// closeGroup ends the group with the given name.
+func (s *handleState) closeGroup(name string) {
+ if s.h.json {
+ s.buf.WriteByte('}')
+ } else {
+ (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */]
+ }
+ s.sep = s.h.attrSep()
+ if s.groups != nil {
+ *s.groups = (*s.groups)[:len(*s.groups)-1]
+ }
+}
+
+// appendAttr appends the Attr's key and value using app.
+// It handles replacement and checking for an empty key.
+// after replacement).
+func (s *handleState) appendAttr(a Attr) {
+ if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup {
+ var gs []string
+ if s.groups != nil {
+ gs = *s.groups
+ }
+ // Resolve before calling ReplaceAttr, so the user doesn't have to.
+ a.Value = a.Value.Resolve()
+ a = rep(gs, a)
+ }
+ a.Value = a.Value.Resolve()
+ // Elide empty Attrs.
+ if a.isEmpty() {
+ return
+ }
+ // Special case: Source.
+ if v := a.Value; v.Kind() == KindAny {
+ if src, ok := v.Any().(*Source); ok {
+ if s.h.json {
+ a.Value = src.group()
+ } else {
+ a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line))
+ }
+ }
+ }
+ if a.Value.Kind() == KindGroup {
+ attrs := a.Value.Group()
+ // Output only non-empty groups.
+ if len(attrs) > 0 {
+ // Inline a group with an empty key.
+ if a.Key != "" {
+ s.openGroup(a.Key)
+ }
+ for _, aa := range attrs {
+ s.appendAttr(aa)
+ }
+ if a.Key != "" {
+ s.closeGroup(a.Key)
+ }
+ }
+ } else {
+ s.appendKey(a.Key)
+ s.appendValue(a.Value)
+ }
+}
+
+func (s *handleState) appendError(err error) {
+ s.appendString(fmt.Sprintf("!ERROR:%v", err))
+}
+
+func (s *handleState) appendKey(key string) {
+ s.buf.WriteString(s.sep)
+ if s.prefix != nil {
+ // TODO: optimize by avoiding allocation.
+ s.appendString(string(*s.prefix) + key)
+ } else {
+ s.appendString(key)
+ }
+ if s.h.json {
+ s.buf.WriteByte(':')
+ } else {
+ s.buf.WriteByte('=')
+ }
+ s.sep = s.h.attrSep()
+}
+
+func (s *handleState) appendString(str string) {
+ if s.h.json {
+ s.buf.WriteByte('"')
+ *s.buf = appendEscapedJSONString(*s.buf, str)
+ s.buf.WriteByte('"')
+ } else {
+ // text
+ if needsQuoting(str) {
+ *s.buf = strconv.AppendQuote(*s.buf, str)
+ } else {
+ s.buf.WriteString(str)
+ }
+ }
+}
+
+func (s *handleState) appendValue(v Value) {
+ var err error
+ if s.h.json {
+ err = appendJSONValue(s, v)
+ } else {
+ err = appendTextValue(s, v)
+ }
+ if err != nil {
+ s.appendError(err)
+ }
+}
+
+func (s *handleState) appendTime(t time.Time) {
+ if s.h.json {
+ appendJSONTime(s, t)
+ } else {
+ writeTimeRFC3339Millis(s.buf, t)
+ }
+}
+
+// This takes half the time of Time.AppendFormat.
+func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) {
+ year, month, day := t.Date()
+ buf.WritePosIntWidth(year, 4)
+ buf.WriteByte('-')
+ buf.WritePosIntWidth(int(month), 2)
+ buf.WriteByte('-')
+ buf.WritePosIntWidth(day, 2)
+ buf.WriteByte('T')
+ hour, min, sec := t.Clock()
+ buf.WritePosIntWidth(hour, 2)
+ buf.WriteByte(':')
+ buf.WritePosIntWidth(min, 2)
+ buf.WriteByte(':')
+ buf.WritePosIntWidth(sec, 2)
+ ns := t.Nanosecond()
+ buf.WriteByte('.')
+ buf.WritePosIntWidth(ns/1e6, 3)
+ _, offsetSeconds := t.Zone()
+ if offsetSeconds == 0 {
+ buf.WriteByte('Z')
+ } else {
+ offsetMinutes := offsetSeconds / 60
+ if offsetMinutes < 0 {
+ buf.WriteByte('-')
+ offsetMinutes = -offsetMinutes
+ } else {
+ buf.WriteByte('+')
+ }
+ buf.WritePosIntWidth(offsetMinutes/60, 2)
+ buf.WriteByte(':')
+ buf.WritePosIntWidth(offsetMinutes%60, 2)
+ }
+}
diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
new file mode 100644
index 0000000..7786c16
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
@@ -0,0 +1,84 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package buffer provides a pool-allocated byte buffer.
+package buffer
+
+import (
+ "sync"
+)
+
+// Buffer adapted from go/src/fmt/print.go
+type Buffer []byte
+
+// Having an initial size gives a dramatic speedup.
+var bufPool = sync.Pool{
+ New: func() any {
+ b := make([]byte, 0, 1024)
+ return (*Buffer)(&b)
+ },
+}
+
+func New() *Buffer {
+ return bufPool.Get().(*Buffer)
+}
+
+func (b *Buffer) Free() {
+ // To reduce peak allocation, return only smaller buffers to the pool.
+ const maxBufferSize = 16 << 10
+ if cap(*b) <= maxBufferSize {
+ *b = (*b)[:0]
+ bufPool.Put(b)
+ }
+}
+
+func (b *Buffer) Reset() {
+ *b = (*b)[:0]
+}
+
+func (b *Buffer) Write(p []byte) (int, error) {
+ *b = append(*b, p...)
+ return len(p), nil
+}
+
+func (b *Buffer) WriteString(s string) {
+ *b = append(*b, s...)
+}
+
+func (b *Buffer) WriteByte(c byte) {
+ *b = append(*b, c)
+}
+
+func (b *Buffer) WritePosInt(i int) {
+ b.WritePosIntWidth(i, 0)
+}
+
+// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left
+// by zeroes to the given width. Use a width of 0 to omit padding.
+func (b *Buffer) WritePosIntWidth(i, width int) {
+ // Cheap integer to fixed-width decimal ASCII.
+ // Copied from log/log.go.
+
+ if i < 0 {
+ panic("negative int")
+ }
+
+ // Assemble decimal in reverse order.
+ var bb [20]byte
+ bp := len(bb) - 1
+ for i >= 10 || width > 1 {
+ width--
+ q := i / 10
+ bb[bp] = byte('0' + i - q*10)
+ bp--
+ i = q
+ }
+ // i < 10
+ bb[bp] = byte('0' + i)
+ b.Write(bb[bp:])
+}
+
+func (b *Buffer) String() string {
+ return string(*b)
+}
diff --git a/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/vendor/golang.org/x/exp/slog/internal/ignorepc.go
new file mode 100644
index 0000000..d125642
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/internal/ignorepc.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// If IgnorePC is true, do not invoke runtime.Callers to get the pc.
+// This is solely for benchmarking the slowdown from runtime.Callers.
+var IgnorePC = false
diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go
new file mode 100644
index 0000000..157ada8
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/json_handler.go
@@ -0,0 +1,336 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/exp/slog/internal/buffer"
+)
+
+// JSONHandler is a Handler that writes Records to an io.Writer as
+// line-delimited JSON objects.
+type JSONHandler struct {
+ *commonHandler
+}
+
+// NewJSONHandler creates a JSONHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
+ if opts == nil {
+ opts = &HandlerOptions{}
+ }
+ return &JSONHandler{
+ &commonHandler{
+ json: true,
+ w: w,
+ opts: *opts,
+ },
+ }
+}
+
+// Enabled reports whether the handler handles records at the given level.
+// The handler ignores records whose level is lower.
+func (h *JSONHandler) Enabled(_ context.Context, level Level) bool {
+ return h.commonHandler.enabled(level)
+}
+
+// WithAttrs returns a new JSONHandler whose attributes consists
+// of h's attributes followed by attrs.
+func (h *JSONHandler) WithAttrs(attrs []Attr) Handler {
+ return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
+}
+
+func (h *JSONHandler) WithGroup(name string) Handler {
+ return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)}
+}
+
+// Handle formats its argument Record as a JSON object on a single line.
+//
+// If the Record's time is zero, the time is omitted.
+// Otherwise, the key is "time"
+// and the value is output as with json.Marshal.
+//
+// If the Record's level is zero, the level is omitted.
+// Otherwise, the key is "level"
+// and the value of [Level.String] is output.
+//
+// If the AddSource option is set and source information is available,
+// the key is "source"
+// and the value is output as "FILE:LINE".
+//
+// The message's key is "msg".
+//
+// To modify these or other attributes, or remove them from the output, use
+// [HandlerOptions.ReplaceAttr].
+//
+// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false),
+// with two exceptions.
+//
+// First, an Attr whose Value is of type error is formatted as a string, by
+// calling its Error method. Only errors in Attrs receive this special treatment,
+// not errors embedded in structs, slices, maps or other data structures that
+// are processed by the encoding/json package.
+//
+// Second, an encoding failure does not cause Handle to return an error.
+// Instead, the error message is formatted as a string.
+//
+// Each call to Handle results in a single serialized call to io.Writer.Write.
+func (h *JSONHandler) Handle(_ context.Context, r Record) error {
+ return h.commonHandler.handle(r)
+}
+
+// Adapted from time.Time.MarshalJSON to avoid allocation.
+func appendJSONTime(s *handleState, t time.Time) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ // RFC 3339 is clear that years are 4 digits exactly.
+ // See golang.org/issue/4556#c15 for more discussion.
+ s.appendError(errors.New("time.Time year outside of range [0,9999]"))
+ }
+ s.buf.WriteByte('"')
+ *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano)
+ s.buf.WriteByte('"')
+}
+
+func appendJSONValue(s *handleState, v Value) error {
+ switch v.Kind() {
+ case KindString:
+ s.appendString(v.str())
+ case KindInt64:
+ *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10)
+ case KindUint64:
+ *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10)
+ case KindFloat64:
+ // json.Marshal is funny about floats; it doesn't
+ // always match strconv.AppendFloat. So just call it.
+ // That's expensive, but floats are rare.
+ if err := appendJSONMarshal(s.buf, v.Float64()); err != nil {
+ return err
+ }
+ case KindBool:
+ *s.buf = strconv.AppendBool(*s.buf, v.Bool())
+ case KindDuration:
+ // Do what json.Marshal does.
+ *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10)
+ case KindTime:
+ s.appendTime(v.Time())
+ case KindAny:
+ a := v.Any()
+ _, jm := a.(json.Marshaler)
+ if err, ok := a.(error); ok && !jm {
+ s.appendString(err.Error())
+ } else {
+ return appendJSONMarshal(s.buf, a)
+ }
+ default:
+ panic(fmt.Sprintf("bad kind: %s", v.Kind()))
+ }
+ return nil
+}
+
+func appendJSONMarshal(buf *buffer.Buffer, v any) error {
+ // Use a json.Encoder to avoid escaping HTML.
+ var bb bytes.Buffer
+ enc := json.NewEncoder(&bb)
+ enc.SetEscapeHTML(false)
+ if err := enc.Encode(v); err != nil {
+ return err
+ }
+ bs := bb.Bytes()
+ buf.Write(bs[:len(bs)-1]) // remove final newline
+ return nil
+}
+
+// appendEscapedJSONString escapes s for JSON and appends it to buf.
+// It does not surround the string in quotation marks.
+//
+// Modified from encoding/json/encode.go:encodeState.string,
+// with escapeHTML set to false.
+func appendEscapedJSONString(buf []byte, s string) []byte {
+ char := func(b byte) { buf = append(buf, b) }
+ str := func(s string) { buf = append(buf, s...) }
+
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if safeSet[b] {
+ i++
+ continue
+ }
+ if start < i {
+ str(s[start:i])
+ }
+ char('\\')
+ switch b {
+ case '\\', '"':
+ char(b)
+ case '\n':
+ char('n')
+ case '\r':
+ char('r')
+ case '\t':
+ char('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ str(`u00`)
+ char(hex[b>>4])
+ char(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ str(s[start:i])
+ }
+ str(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ str(s[start:i])
+ }
+ str(`\u202`)
+ char(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ str(s[start:])
+ }
+ return buf
+}
+
+var hex = "0123456789abcdef"
+
+// Copied from encoding/json/tables.go.
+//
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': true,
+ '=': true,
+ '>': true,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
diff --git a/vendor/golang.org/x/exp/slog/level.go b/vendor/golang.org/x/exp/slog/level.go
new file mode 100644
index 0000000..b2365f0
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/level.go
@@ -0,0 +1,201 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync/atomic"
+)
+
+// A Level is the importance or severity of a log event.
+// The higher the level, the more important or severe the event.
+type Level int
+
+// Level numbers are inherently arbitrary,
+// but we picked them to satisfy three constraints.
+// Any system can map them to another numbering scheme if it wishes.
+//
+// First, we wanted the default level to be Info, Since Levels are ints, Info is
+// the default value for int, zero.
+//
+
+// Second, we wanted to make it easy to use levels to specify logger verbosity.
+// Since a larger level means a more severe event, a logger that accepts events
+// with smaller (or more negative) level means a more verbose logger. Logger
+// verbosity is thus the negation of event severity, and the default verbosity
+// of 0 accepts all events at least as severe as INFO.
+//
+// Third, we wanted some room between levels to accommodate schemes with named
+// levels between ours. For example, Google Cloud Logging defines a Notice level
+// between Info and Warn. Since there are only a few of these intermediate
+// levels, the gap between the numbers need not be large. Our gap of 4 matches
+// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
+// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
+// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
+// does not. But those OpenTelemetry levels can still be represented as slog
+// Levels by using the appropriate integers.
+//
+// Names for common levels.
+const (
+ LevelDebug Level = -4
+ LevelInfo Level = 0
+ LevelWarn Level = 4
+ LevelError Level = 8
+)
+
+// String returns a name for the level.
+// If the level has a name, then that name
+// in uppercase is returned.
+// If the level is between named values, then
+// an integer is appended to the uppercased name.
+// Examples:
+//
+// LevelWarn.String() => "WARN"
+// (LevelInfo+2).String() => "INFO+2"
+func (l Level) String() string {
+ str := func(base string, val Level) string {
+ if val == 0 {
+ return base
+ }
+ return fmt.Sprintf("%s%+d", base, val)
+ }
+
+ switch {
+ case l < LevelInfo:
+ return str("DEBUG", l-LevelDebug)
+ case l < LevelWarn:
+ return str("INFO", l-LevelInfo)
+ case l < LevelError:
+ return str("WARN", l-LevelWarn)
+ default:
+ return str("ERROR", l-LevelError)
+ }
+}
+
+// MarshalJSON implements [encoding/json.Marshaler]
+// by quoting the output of [Level.String].
+func (l Level) MarshalJSON() ([]byte, error) {
+ // AppendQuote is sufficient for JSON-encoding all Level strings.
+ // They don't contain any runes that would produce invalid JSON
+ // when escaped.
+ return strconv.AppendQuote(nil, l.String()), nil
+}
+
+// UnmarshalJSON implements [encoding/json.Unmarshaler]
+// It accepts any string produced by [Level.MarshalJSON],
+// ignoring case.
+// It also accepts numeric offsets that would result in a different string on
+// output. For example, "Error-8" would marshal as "INFO".
+func (l *Level) UnmarshalJSON(data []byte) error {
+ s, err := strconv.Unquote(string(data))
+ if err != nil {
+ return err
+ }
+ return l.parse(s)
+}
+
+// MarshalText implements [encoding.TextMarshaler]
+// by calling [Level.String].
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText implements [encoding.TextUnmarshaler].
+// It accepts any string produced by [Level.MarshalText],
+// ignoring case.
+// It also accepts numeric offsets that would result in a different string on
+// output. For example, "Error-8" would marshal as "INFO".
+func (l *Level) UnmarshalText(data []byte) error {
+ return l.parse(string(data))
+}
+
+func (l *Level) parse(s string) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("slog: level string %q: %w", s, err)
+ }
+ }()
+
+ name := s
+ offset := 0
+ if i := strings.IndexAny(s, "+-"); i >= 0 {
+ name = s[:i]
+ offset, err = strconv.Atoi(s[i:])
+ if err != nil {
+ return err
+ }
+ }
+ switch strings.ToUpper(name) {
+ case "DEBUG":
+ *l = LevelDebug
+ case "INFO":
+ *l = LevelInfo
+ case "WARN":
+ *l = LevelWarn
+ case "ERROR":
+ *l = LevelError
+ default:
+ return errors.New("unknown name")
+ }
+ *l += Level(offset)
+ return nil
+}
+
+// Level returns the receiver.
+// It implements Leveler.
+func (l Level) Level() Level { return l }
+
+// A LevelVar is a Level variable, to allow a Handler level to change
+// dynamically.
+// It implements Leveler as well as a Set method,
+// and it is safe for use by multiple goroutines.
+// The zero LevelVar corresponds to LevelInfo.
+type LevelVar struct {
+ val atomic.Int64
+}
+
+// Level returns v's level.
+func (v *LevelVar) Level() Level {
+ return Level(int(v.val.Load()))
+}
+
+// Set sets v's level to l.
+func (v *LevelVar) Set(l Level) {
+ v.val.Store(int64(l))
+}
+
+func (v *LevelVar) String() string {
+ return fmt.Sprintf("LevelVar(%s)", v.Level())
+}
+
+// MarshalText implements [encoding.TextMarshaler]
+// by calling [Level.MarshalText].
+func (v *LevelVar) MarshalText() ([]byte, error) {
+ return v.Level().MarshalText()
+}
+
+// UnmarshalText implements [encoding.TextUnmarshaler]
+// by calling [Level.UnmarshalText].
+func (v *LevelVar) UnmarshalText(data []byte) error {
+ var l Level
+ if err := l.UnmarshalText(data); err != nil {
+ return err
+ }
+ v.Set(l)
+ return nil
+}
+
+// A Leveler provides a Level value.
+//
+// As Level itself implements Leveler, clients typically supply
+// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// Clients who need to vary the level dynamically can provide a more complex
+// Leveler implementation such as *LevelVar.
+type Leveler interface {
+ Level() Level
+}
diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go
new file mode 100644
index 0000000..e87ec99
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/logger.go
@@ -0,0 +1,343 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "context"
+ "log"
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/exp/slog/internal"
+)
+
+var defaultLogger atomic.Value
+
+func init() {
+ defaultLogger.Store(New(newDefaultHandler(log.Output)))
+}
+
+// Default returns the default Logger.
+func Default() *Logger { return defaultLogger.Load().(*Logger) }
+
+// SetDefault makes l the default Logger.
+// After this call, output from the log package's default Logger
+// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+func SetDefault(l *Logger) {
+ defaultLogger.Store(l)
+ // If the default's handler is a defaultHandler, then don't use a handleWriter,
+ // or we'll deadlock as they both try to acquire the log default mutex.
+ // The defaultHandler will use whatever the log default writer is currently
+ // set to, which is correct.
+ // This can occur with SetDefault(Default()).
+ // See TestSetDefault.
+ if _, ok := l.Handler().(*defaultHandler); !ok {
+ capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0
+ log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC})
+ log.SetFlags(0) // we want just the log message, no time or location
+ }
+}
+
+// handlerWriter is an io.Writer that calls a Handler.
+// It is used to link the default log.Logger to the default slog.Logger.
+type handlerWriter struct {
+ h Handler
+ level Level
+ capturePC bool
+}
+
+func (w *handlerWriter) Write(buf []byte) (int, error) {
+ if !w.h.Enabled(context.Background(), w.level) {
+ return 0, nil
+ }
+ var pc uintptr
+ if !internal.IgnorePC && w.capturePC {
+ // skip [runtime.Callers, w.Write, Logger.Output, log.Print]
+ var pcs [1]uintptr
+ runtime.Callers(4, pcs[:])
+ pc = pcs[0]
+ }
+
+ // Remove final newline.
+ origLen := len(buf) // Report that the entire buf was written.
+ if len(buf) > 0 && buf[len(buf)-1] == '\n' {
+ buf = buf[:len(buf)-1]
+ }
+ r := NewRecord(time.Now(), w.level, string(buf), pc)
+ return origLen, w.h.Handle(context.Background(), r)
+}
+
+// A Logger records structured information about each call to its
+// Log, Debug, Info, Warn, and Error methods.
+// For each call, it creates a Record and passes it to a Handler.
+//
+// To create a new Logger, call [New] or a Logger method
+// that begins "With".
+type Logger struct {
+ handler Handler // for structured logging
+}
+
+func (l *Logger) clone() *Logger {
+ c := *l
+ return &c
+}
+
+// Handler returns l's Handler.
+func (l *Logger) Handler() Handler { return l.handler }
+
+// With returns a new Logger that includes the given arguments, converted to
+// Attrs as in [Logger.Log].
+// The Attrs will be added to each output from the Logger.
+// The new Logger shares the old Logger's context.
+// The new Logger's handler is the result of calling WithAttrs on the receiver's
+// handler.
+func (l *Logger) With(args ...any) *Logger {
+ c := l.clone()
+ c.handler = l.handler.WithAttrs(argsToAttrSlice(args))
+ return c
+}
+
+// WithGroup returns a new Logger that starts a group. The keys of all
+// attributes added to the Logger will be qualified by the given name.
+// (How that qualification happens depends on the [Handler.WithGroup]
+// method of the Logger's Handler.)
+// The new Logger shares the old Logger's context.
+//
+// The new Logger's handler is the result of calling WithGroup on the receiver's
+// handler.
+func (l *Logger) WithGroup(name string) *Logger {
+ c := l.clone()
+ c.handler = l.handler.WithGroup(name)
+ return c
+
+}
+
+// New creates a new Logger with the given non-nil Handler and a nil context.
+func New(h Handler) *Logger {
+ if h == nil {
+ panic("nil Handler")
+ }
+ return &Logger{handler: h}
+}
+
+// With calls Logger.With on the default logger.
+func With(args ...any) *Logger {
+ return Default().With(args...)
+}
+
+// Enabled reports whether l emits log records at the given context and level.
+func (l *Logger) Enabled(ctx context.Context, level Level) bool {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return l.Handler().Enabled(ctx, level)
+}
+
+// NewLogLogger returns a new log.Logger such that each call to its Output method
+// dispatches a Record to the specified handler. The logger acts as a bridge from
+// the older log API to newer structured logging handlers.
+func NewLogLogger(h Handler, level Level) *log.Logger {
+ return log.New(&handlerWriter{h, level, true}, "", 0)
+}
+
+// Log emits a log record with the current time and the given level and message.
+// The Record's Attrs consist of the Logger's attributes followed by
+// the Attrs specified by args.
+//
+// The attribute arguments are processed as follows:
+// - If an argument is an Attr, it is used as is.
+// - If an argument is a string and this is not the last argument,
+// the following argument is treated as the value and the two are combined
+// into an Attr.
+// - Otherwise, the argument is treated as a value with key "!BADKEY".
+func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) {
+ l.log(ctx, level, msg, args...)
+}
+
+// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
+func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ l.logAttrs(ctx, level, msg, attrs...)
+}
+
+// Debug logs at LevelDebug.
+func (l *Logger) Debug(msg string, args ...any) {
+ l.log(nil, LevelDebug, msg, args...)
+}
+
+// DebugContext logs at LevelDebug with the given context.
+func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelDebug, msg, args...)
+}
+
+// DebugCtx logs at LevelDebug with the given context.
+// Deprecated: Use Logger.DebugContext.
+func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelDebug, msg, args...)
+}
+
+// Info logs at LevelInfo.
+func (l *Logger) Info(msg string, args ...any) {
+ l.log(nil, LevelInfo, msg, args...)
+}
+
+// InfoContext logs at LevelInfo with the given context.
+func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelInfo, msg, args...)
+}
+
+// InfoCtx logs at LevelInfo with the given context.
+// Deprecated: Use Logger.InfoContext.
+func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelInfo, msg, args...)
+}
+
+// Warn logs at LevelWarn.
+func (l *Logger) Warn(msg string, args ...any) {
+ l.log(nil, LevelWarn, msg, args...)
+}
+
+// WarnContext logs at LevelWarn with the given context.
+func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelWarn, msg, args...)
+}
+
+// WarnCtx logs at LevelWarn with the given context.
+// Deprecated: Use Logger.WarnContext.
+func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelWarn, msg, args...)
+}
+
+// Error logs at LevelError.
+func (l *Logger) Error(msg string, args ...any) {
+ l.log(nil, LevelError, msg, args...)
+}
+
+// ErrorContext logs at LevelError with the given context.
+func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelError, msg, args...)
+}
+
+// ErrorCtx logs at LevelError with the given context.
+// Deprecated: Use Logger.ErrorContext.
+func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) {
+ l.log(ctx, LevelError, msg, args...)
+}
+
+// log is the low-level logging method for methods that take ...any.
+// It must always be called directly by an exported logging method
+// or function, because it uses a fixed call depth to obtain the pc.
+func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) {
+ if !l.Enabled(ctx, level) {
+ return
+ }
+ var pc uintptr
+ if !internal.IgnorePC {
+ var pcs [1]uintptr
+ // skip [runtime.Callers, this function, this function's caller]
+ runtime.Callers(3, pcs[:])
+ pc = pcs[0]
+ }
+ r := NewRecord(time.Now(), level, msg, pc)
+ r.Add(args...)
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ _ = l.Handler().Handle(ctx, r)
+}
+
+// logAttrs is like [Logger.log], but for methods that take ...Attr.
+func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ if !l.Enabled(ctx, level) {
+ return
+ }
+ var pc uintptr
+ if !internal.IgnorePC {
+ var pcs [1]uintptr
+ // skip [runtime.Callers, this function, this function's caller]
+ runtime.Callers(3, pcs[:])
+ pc = pcs[0]
+ }
+ r := NewRecord(time.Now(), level, msg, pc)
+ r.AddAttrs(attrs...)
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ _ = l.Handler().Handle(ctx, r)
+}
+
+// Debug calls Logger.Debug on the default logger.
+func Debug(msg string, args ...any) {
+ Default().log(nil, LevelDebug, msg, args...)
+}
+
+// DebugContext calls Logger.DebugContext on the default logger.
+func DebugContext(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelDebug, msg, args...)
+}
+
+// Info calls Logger.Info on the default logger.
+func Info(msg string, args ...any) {
+ Default().log(nil, LevelInfo, msg, args...)
+}
+
+// InfoContext calls Logger.InfoContext on the default logger.
+func InfoContext(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelInfo, msg, args...)
+}
+
+// Warn calls Logger.Warn on the default logger.
+func Warn(msg string, args ...any) {
+ Default().log(nil, LevelWarn, msg, args...)
+}
+
+// WarnContext calls Logger.WarnContext on the default logger.
+func WarnContext(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelWarn, msg, args...)
+}
+
+// Error calls Logger.Error on the default logger.
+func Error(msg string, args ...any) {
+ Default().log(nil, LevelError, msg, args...)
+}
+
+// ErrorContext calls Logger.ErrorContext on the default logger.
+func ErrorContext(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelError, msg, args...)
+}
+
+// DebugCtx calls Logger.DebugContext on the default logger.
+// Deprecated: call DebugContext.
+func DebugCtx(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelDebug, msg, args...)
+}
+
+// InfoCtx calls Logger.InfoContext on the default logger.
+// Deprecated: call InfoContext.
+func InfoCtx(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelInfo, msg, args...)
+}
+
+// WarnCtx calls Logger.WarnContext on the default logger.
+// Deprecated: call WarnContext.
+func WarnCtx(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelWarn, msg, args...)
+}
+
+// ErrorCtx calls Logger.ErrorContext on the default logger.
+// Deprecated: call ErrorContext.
+func ErrorCtx(ctx context.Context, msg string, args ...any) {
+ Default().log(ctx, LevelError, msg, args...)
+}
+
+// Log calls Logger.Log on the default logger.
+func Log(ctx context.Context, level Level, msg string, args ...any) {
+ Default().log(ctx, level, msg, args...)
+}
+
+// LogAttrs calls Logger.LogAttrs on the default logger.
+func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ Default().logAttrs(ctx, level, msg, attrs...)
+}
diff --git a/vendor/golang.org/x/exp/slog/noplog.bench b/vendor/golang.org/x/exp/slog/noplog.bench
new file mode 100644
index 0000000..ed9296f
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/noplog.bench
@@ -0,0 +1,36 @@
+goos: linux
+goarch: amd64
+pkg: golang.org/x/exp/slog
+cpu: Intel(R) Xeon(R) CPU @ 2.20GHz
+BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op
+BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op
+PASS
+ok golang.org/x/exp/slog 40.566s
diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go
new file mode 100644
index 0000000..38b3440
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/record.go
@@ -0,0 +1,207 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "runtime"
+ "time"
+
+ "golang.org/x/exp/slices"
+)
+
+const nAttrsInline = 5
+
+// A Record holds information about a log event.
+// Copies of a Record share state.
+// Do not modify a Record after handing out a copy to it.
+// Use [Record.Clone] to create a copy with no shared state.
+type Record struct {
+ // The time at which the output method (Log, Info, etc.) was called.
+ Time time.Time
+
+ // The log message.
+ Message string
+
+ // The level of the event.
+ Level Level
+
+ // The program counter at the time the record was constructed, as determined
+ // by runtime.Callers. If zero, no program counter is available.
+ //
+ // The only valid use for this value is as an argument to
+ // [runtime.CallersFrames]. In particular, it must not be passed to
+ // [runtime.FuncForPC].
+ PC uintptr
+
+ // Allocation optimization: an inline array sized to hold
+ // the majority of log calls (based on examination of open-source
+ // code). It holds the start of the list of Attrs.
+ front [nAttrsInline]Attr
+
+ // The number of Attrs in front.
+ nFront int
+
+ // The list of Attrs except for those in front.
+ // Invariants:
+ // - len(back) > 0 iff nFront == len(front)
+ // - Unused array elements are zero. Used to detect mistakes.
+ back []Attr
+}
+
+// NewRecord creates a Record from the given arguments.
+// Use [Record.AddAttrs] to add attributes to the Record.
+//
+// NewRecord is intended for logging APIs that want to support a [Handler] as
+// a backend.
+func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
+ return Record{
+ Time: t,
+ Message: msg,
+ Level: level,
+ PC: pc,
+ }
+}
+
+// Clone returns a copy of the record with no shared state.
+// The original record and the clone can both be modified
+// without interfering with each other.
+func (r Record) Clone() Record {
+ r.back = slices.Clip(r.back) // prevent append from mutating shared array
+ return r
+}
+
+// NumAttrs returns the number of attributes in the Record.
+func (r Record) NumAttrs() int {
+ return r.nFront + len(r.back)
+}
+
+// Attrs calls f on each Attr in the Record.
+// Iteration stops if f returns false.
+func (r Record) Attrs(f func(Attr) bool) {
+ for i := 0; i < r.nFront; i++ {
+ if !f(r.front[i]) {
+ return
+ }
+ }
+ for _, a := range r.back {
+ if !f(a) {
+ return
+ }
+ }
+}
+
+// AddAttrs appends the given Attrs to the Record's list of Attrs.
+func (r *Record) AddAttrs(attrs ...Attr) {
+ n := copy(r.front[r.nFront:], attrs)
+ r.nFront += n
+ // Check if a copy was modified by slicing past the end
+ // and seeing if the Attr there is non-zero.
+ if cap(r.back) > len(r.back) {
+ end := r.back[:len(r.back)+1][len(r.back)]
+ if !end.isEmpty() {
+ panic("copies of a slog.Record were both modified")
+ }
+ }
+ r.back = append(r.back, attrs[n:]...)
+}
+
+// Add converts the args to Attrs as described in [Logger.Log],
+// then appends the Attrs to the Record's list of Attrs.
+func (r *Record) Add(args ...any) {
+ var a Attr
+ for len(args) > 0 {
+ a, args = argsToAttr(args)
+ if r.nFront < len(r.front) {
+ r.front[r.nFront] = a
+ r.nFront++
+ } else {
+ if r.back == nil {
+ r.back = make([]Attr, 0, countAttrs(args))
+ }
+ r.back = append(r.back, a)
+ }
+ }
+
+}
+
+// countAttrs returns the number of Attrs that would be created from args.
+func countAttrs(args []any) int {
+ n := 0
+ for i := 0; i < len(args); i++ {
+ n++
+ if _, ok := args[i].(string); ok {
+ i++
+ }
+ }
+ return n
+}
+
+const badKey = "!BADKEY"
+
+// argsToAttr turns a prefix of the nonempty args slice into an Attr
+// and returns the unconsumed portion of the slice.
+// If args[0] is an Attr, it returns it.
+// If args[0] is a string, it treats the first two elements as
+// a key-value pair.
+// Otherwise, it treats args[0] as a value with a missing key.
+func argsToAttr(args []any) (Attr, []any) {
+ switch x := args[0].(type) {
+ case string:
+ if len(args) == 1 {
+ return String(badKey, x), nil
+ }
+ return Any(x, args[1]), args[2:]
+
+ case Attr:
+ return x, args[1:]
+
+ default:
+ return Any(badKey, x), args[1:]
+ }
+}
+
+// Source describes the location of a line of source code.
+type Source struct {
+ // Function is the package path-qualified function name containing the
+ // source line. If non-empty, this string uniquely identifies a single
+ // function in the program. This may be the empty string if not known.
+ Function string `json:"function"`
+ // File and Line are the file name and line number (1-based) of the source
+ // line. These may be the empty string and zero, respectively, if not known.
+ File string `json:"file"`
+ Line int `json:"line"`
+}
+
+// attrs returns the non-zero fields of s as a slice of attrs.
+// It is similar to a LogValue method, but we don't want Source
+// to implement LogValuer because it would be resolved before
+// the ReplaceAttr function was called.
+func (s *Source) group() Value {
+ var as []Attr
+ if s.Function != "" {
+ as = append(as, String("function", s.Function))
+ }
+ if s.File != "" {
+ as = append(as, String("file", s.File))
+ }
+ if s.Line != 0 {
+ as = append(as, Int("line", s.Line))
+ }
+ return GroupValue(as...)
+}
+
+// source returns a Source for the log event.
+// If the Record was created without the necessary information,
+// or if the location is unavailable, it returns a non-nil *Source
+// with zero fields.
+func (r Record) source() *Source {
+ fs := runtime.CallersFrames([]uintptr{r.PC})
+ f, _ := fs.Next()
+ return &Source{
+ Function: f.Function,
+ File: f.File,
+ Line: f.Line,
+ }
+}
diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go
new file mode 100644
index 0000000..75b66b7
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/text_handler.go
@@ -0,0 +1,161 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "context"
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "unicode"
+ "unicode/utf8"
+)
+
+// TextHandler is a Handler that writes Records to an io.Writer as a
+// sequence of key=value pairs separated by spaces and followed by a newline.
+type TextHandler struct {
+ *commonHandler
+}
+
+// NewTextHandler creates a TextHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
+ if opts == nil {
+ opts = &HandlerOptions{}
+ }
+ return &TextHandler{
+ &commonHandler{
+ json: false,
+ w: w,
+ opts: *opts,
+ },
+ }
+}
+
+// Enabled reports whether the handler handles records at the given level.
+// The handler ignores records whose level is lower.
+func (h *TextHandler) Enabled(_ context.Context, level Level) bool {
+ return h.commonHandler.enabled(level)
+}
+
+// WithAttrs returns a new TextHandler whose attributes consists
+// of h's attributes followed by attrs.
+func (h *TextHandler) WithAttrs(attrs []Attr) Handler {
+ return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
+}
+
+func (h *TextHandler) WithGroup(name string) Handler {
+ return &TextHandler{commonHandler: h.commonHandler.withGroup(name)}
+}
+
+// Handle formats its argument Record as a single line of space-separated
+// key=value items.
+//
+// If the Record's time is zero, the time is omitted.
+// Otherwise, the key is "time"
+// and the value is output in RFC3339 format with millisecond precision.
+//
+// If the Record's level is zero, the level is omitted.
+// Otherwise, the key is "level"
+// and the value of [Level.String] is output.
+//
+// If the AddSource option is set and source information is available,
+// the key is "source" and the value is output as FILE:LINE.
+//
+// The message's key is "msg".
+//
+// To modify these or other attributes, or remove them from the output, use
+// [HandlerOptions.ReplaceAttr].
+//
+// If a value implements [encoding.TextMarshaler], the result of MarshalText is
+// written. Otherwise, the result of fmt.Sprint is written.
+//
+// Keys and values are quoted with [strconv.Quote] if they contain Unicode space
+// characters, non-printing characters, '"' or '='.
+//
+// Keys inside groups consist of components (keys or group names) separated by
+// dots. No further escaping is performed.
+// Thus there is no way to determine from the key "a.b.c" whether there
+// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c",
+// or single group "a" and a key "b.c".
+// If it is necessary to reconstruct the group structure of a key
+// even in the presence of dots inside components, use
+// [HandlerOptions.ReplaceAttr] to encode that information in the key.
+//
+// Each call to Handle results in a single serialized call to
+// io.Writer.Write.
+func (h *TextHandler) Handle(_ context.Context, r Record) error {
+ return h.commonHandler.handle(r)
+}
+
+func appendTextValue(s *handleState, v Value) error {
+ switch v.Kind() {
+ case KindString:
+ s.appendString(v.str())
+ case KindTime:
+ s.appendTime(v.time())
+ case KindAny:
+ if tm, ok := v.any.(encoding.TextMarshaler); ok {
+ data, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ // TODO: avoid the conversion to string.
+ s.appendString(string(data))
+ return nil
+ }
+ if bs, ok := byteSlice(v.any); ok {
+ // As of Go 1.19, this only allocates for strings longer than 32 bytes.
+ s.buf.WriteString(strconv.Quote(string(bs)))
+ return nil
+ }
+ s.appendString(fmt.Sprintf("%+v", v.Any()))
+ default:
+ *s.buf = v.append(*s.buf)
+ }
+ return nil
+}
+
+// byteSlice returns its argument as a []byte if the argument's
+// underlying type is []byte, along with a second return value of true.
+// Otherwise it returns nil, false.
+func byteSlice(a any) ([]byte, bool) {
+ if bs, ok := a.([]byte); ok {
+ return bs, true
+ }
+ // Like Printf's %s, we allow both the slice type and the byte element type to be named.
+ t := reflect.TypeOf(a)
+ if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ return reflect.ValueOf(a).Bytes(), true
+ }
+ return nil, false
+}
+
+func needsQuoting(s string) bool {
+ if len(s) == 0 {
+ return true
+ }
+ for i := 0; i < len(s); {
+ b := s[i]
+ if b < utf8.RuneSelf {
+ // Quote anything except a backslash that would need quoting in a
+ // JSON string, as well as space and '='
+ if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) {
+ return true
+ }
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) {
+ return true
+ }
+ i += size
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go
new file mode 100644
index 0000000..3550c46
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/value.go
@@ -0,0 +1,456 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+ "unsafe"
+
+ "golang.org/x/exp/slices"
+)
+
+// A Value can represent any Go value, but unlike type any,
+// it can represent most small values without an allocation.
+// The zero Value corresponds to nil.
+type Value struct {
+ _ [0]func() // disallow ==
+ // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration,
+ // the string length for KindString, and nanoseconds since the epoch for KindTime.
+ num uint64
+ // If any is of type Kind, then the value is in num as described above.
+ // If any is of type *time.Location, then the Kind is Time and time.Time value
+ // can be constructed from the Unix nanos in num and the location (monotonic time
+ // is not preserved).
+ // If any is of type stringptr, then the Kind is String and the string value
+ // consists of the length in num and the pointer in any.
+ // Otherwise, the Kind is Any and any is the value.
+ // (This implies that Attrs cannot store values of type Kind, *time.Location
+ // or stringptr.)
+ any any
+}
+
+// Kind is the kind of a Value.
+type Kind int
+
+// The following list is sorted alphabetically, but it's also important that
+// KindAny is 0 so that a zero Value represents nil.
+
+const (
+ KindAny Kind = iota
+ KindBool
+ KindDuration
+ KindFloat64
+ KindInt64
+ KindString
+ KindTime
+ KindUint64
+ KindGroup
+ KindLogValuer
+)
+
+var kindStrings = []string{
+ "Any",
+ "Bool",
+ "Duration",
+ "Float64",
+ "Int64",
+ "String",
+ "Time",
+ "Uint64",
+ "Group",
+ "LogValuer",
+}
+
+func (k Kind) String() string {
+ if k >= 0 && int(k) < len(kindStrings) {
+ return kindStrings[k]
+ }
+ return ""
+}
+
+// Unexported version of Kind, just so we can store Kinds in Values.
+// (No user-provided value has this type.)
+type kind Kind
+
+// Kind returns v's Kind.
+func (v Value) Kind() Kind {
+ switch x := v.any.(type) {
+ case Kind:
+ return x
+ case stringptr:
+ return KindString
+ case timeLocation:
+ return KindTime
+ case groupptr:
+ return KindGroup
+ case LogValuer:
+ return KindLogValuer
+ case kind: // a kind is just a wrapper for a Kind
+ return KindAny
+ default:
+ return KindAny
+ }
+}
+
+//////////////// Constructors
+
+// IntValue returns a Value for an int.
+func IntValue(v int) Value {
+ return Int64Value(int64(v))
+}
+
+// Int64Value returns a Value for an int64.
+func Int64Value(v int64) Value {
+ return Value{num: uint64(v), any: KindInt64}
+}
+
+// Uint64Value returns a Value for a uint64.
+func Uint64Value(v uint64) Value {
+ return Value{num: v, any: KindUint64}
+}
+
+// Float64Value returns a Value for a floating-point number.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: KindFloat64}
+}
+
+// BoolValue returns a Value for a bool.
+func BoolValue(v bool) Value {
+ u := uint64(0)
+ if v {
+ u = 1
+ }
+ return Value{num: u, any: KindBool}
+}
+
+// Unexported version of *time.Location, just so we can store *time.Locations in
+// Values. (No user-provided value has this type.)
+type timeLocation *time.Location
+
+// TimeValue returns a Value for a time.Time.
+// It discards the monotonic portion.
+func TimeValue(v time.Time) Value {
+ if v.IsZero() {
+ // UnixNano on the zero time is undefined, so represent the zero time
+ // with a nil *time.Location instead. time.Time.Location method never
+ // returns nil, so a Value with any == timeLocation(nil) cannot be
+ // mistaken for any other Value, time.Time or otherwise.
+ return Value{any: timeLocation(nil)}
+ }
+ return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())}
+}
+
+// DurationValue returns a Value for a time.Duration.
+func DurationValue(v time.Duration) Value {
+ return Value{num: uint64(v.Nanoseconds()), any: KindDuration}
+}
+
+// AnyValue returns a Value for the supplied value.
+//
+// If the supplied value is of type Value, it is returned
+// unmodified.
+//
+// Given a value of one of Go's predeclared string, bool, or
+// (non-complex) numeric types, AnyValue returns a Value of kind
+// String, Bool, Uint64, Int64, or Float64. The width of the
+// original numeric type is not preserved.
+//
+// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
+// KindTime or KindDuration. The monotonic time is not preserved.
+//
+// For nil, or values of all other types, including named types whose
+// underlying type is numeric, AnyValue returns a value of kind KindAny.
+func AnyValue(v any) Value {
+ switch v := v.(type) {
+ case string:
+ return StringValue(v)
+ case int:
+ return Int64Value(int64(v))
+ case uint:
+ return Uint64Value(uint64(v))
+ case int64:
+ return Int64Value(v)
+ case uint64:
+ return Uint64Value(v)
+ case bool:
+ return BoolValue(v)
+ case time.Duration:
+ return DurationValue(v)
+ case time.Time:
+ return TimeValue(v)
+ case uint8:
+ return Uint64Value(uint64(v))
+ case uint16:
+ return Uint64Value(uint64(v))
+ case uint32:
+ return Uint64Value(uint64(v))
+ case uintptr:
+ return Uint64Value(uint64(v))
+ case int8:
+ return Int64Value(int64(v))
+ case int16:
+ return Int64Value(int64(v))
+ case int32:
+ return Int64Value(int64(v))
+ case float64:
+ return Float64Value(v)
+ case float32:
+ return Float64Value(float64(v))
+ case []Attr:
+ return GroupValue(v...)
+ case Kind:
+ return Value{any: kind(v)}
+ case Value:
+ return v
+ default:
+ return Value{any: v}
+ }
+}
+
+//////////////// Accessors
+
+// Any returns v's value as an any.
+func (v Value) Any() any {
+ switch v.Kind() {
+ case KindAny:
+ if k, ok := v.any.(kind); ok {
+ return Kind(k)
+ }
+ return v.any
+ case KindLogValuer:
+ return v.any
+ case KindGroup:
+ return v.group()
+ case KindInt64:
+ return int64(v.num)
+ case KindUint64:
+ return v.num
+ case KindFloat64:
+ return v.float()
+ case KindString:
+ return v.str()
+ case KindBool:
+ return v.bool()
+ case KindDuration:
+ return v.duration()
+ case KindTime:
+ return v.time()
+ default:
+ panic(fmt.Sprintf("bad kind: %s", v.Kind()))
+ }
+}
+
+// Int64 returns v's value as an int64. It panics
+// if v is not a signed integer.
+func (v Value) Int64() int64 {
+ if g, w := v.Kind(), KindInt64; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+ return int64(v.num)
+}
+
+// Uint64 returns v's value as a uint64. It panics
+// if v is not an unsigned integer.
+func (v Value) Uint64() uint64 {
+ if g, w := v.Kind(), KindUint64; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+ return v.num
+}
+
+// Bool returns v's value as a bool. It panics
+// if v is not a bool.
+func (v Value) Bool() bool {
+ if g, w := v.Kind(), KindBool; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+ return v.bool()
+}
+
+func (v Value) bool() bool {
+ return v.num == 1
+}
+
+// Duration returns v's value as a time.Duration. It panics
+// if v is not a time.Duration.
+func (v Value) Duration() time.Duration {
+ if g, w := v.Kind(), KindDuration; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+
+ return v.duration()
+}
+
+func (v Value) duration() time.Duration {
+ return time.Duration(int64(v.num))
+}
+
+// Float64 returns v's value as a float64. It panics
+// if v is not a float64.
+func (v Value) Float64() float64 {
+ if g, w := v.Kind(), KindFloat64; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+
+ return v.float()
+}
+
+func (v Value) float() float64 {
+ return math.Float64frombits(v.num)
+}
+
+// Time returns v's value as a time.Time. It panics
+// if v is not a time.Time.
+func (v Value) Time() time.Time {
+ if g, w := v.Kind(), KindTime; g != w {
+ panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
+ }
+ return v.time()
+}
+
+func (v Value) time() time.Time {
+ loc := v.any.(timeLocation)
+ if loc == nil {
+ return time.Time{}
+ }
+ return time.Unix(0, int64(v.num)).In(loc)
+}
+
+// LogValuer returns v's value as a LogValuer. It panics
+// if v is not a LogValuer.
+func (v Value) LogValuer() LogValuer {
+ return v.any.(LogValuer)
+}
+
+// Group returns v's value as a []Attr.
+// It panics if v's Kind is not KindGroup.
+func (v Value) Group() []Attr {
+ if sp, ok := v.any.(groupptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ panic("Group: bad kind")
+}
+
+func (v Value) group() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num)
+}
+
+//////////////// Other
+
+// Equal reports whether v and w represent the same Go value.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case KindInt64, KindUint64, KindBool, KindDuration:
+ return v.num == w.num
+ case KindString:
+ return v.str() == w.str()
+ case KindFloat64:
+ return v.float() == w.float()
+ case KindTime:
+ return v.time().Equal(w.time())
+ case KindAny, KindLogValuer:
+ return v.any == w.any // may panic if non-comparable
+ case KindGroup:
+ return slices.EqualFunc(v.group(), w.group(), Attr.Equal)
+ default:
+ panic(fmt.Sprintf("bad kind: %s", k1))
+ }
+}
+
+// append appends a text representation of v to dst.
+// v is formatted as with fmt.Sprint.
+func (v Value) append(dst []byte) []byte {
+ switch v.Kind() {
+ case KindString:
+ return append(dst, v.str()...)
+ case KindInt64:
+ return strconv.AppendInt(dst, int64(v.num), 10)
+ case KindUint64:
+ return strconv.AppendUint(dst, v.num, 10)
+ case KindFloat64:
+ return strconv.AppendFloat(dst, v.float(), 'g', -1, 64)
+ case KindBool:
+ return strconv.AppendBool(dst, v.bool())
+ case KindDuration:
+ return append(dst, v.duration().String()...)
+ case KindTime:
+ return append(dst, v.time().String()...)
+ case KindGroup:
+ return fmt.Append(dst, v.group())
+ case KindAny, KindLogValuer:
+ return fmt.Append(dst, v.any)
+ default:
+ panic(fmt.Sprintf("bad kind: %s", v.Kind()))
+ }
+}
+
+// A LogValuer is any Go value that can convert itself into a Value for logging.
+//
+// This mechanism may be used to defer expensive operations until they are
+// needed, or to expand a single value into a sequence of components.
+type LogValuer interface {
+ LogValue() Value
+}
+
+const maxLogValues = 100
+
+// Resolve repeatedly calls LogValue on v while it implements LogValuer,
+// and returns the result.
+// If v resolves to a group, the group's attributes' values are not recursively
+// resolved.
+// If the number of LogValue calls exceeds a threshold, a Value containing an
+// error is returned.
+// Resolve's return value is guaranteed not to be of Kind KindLogValuer.
+func (v Value) Resolve() (rv Value) {
+ orig := v
+ defer func() {
+ if r := recover(); r != nil {
+ rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5)))
+ }
+ }()
+
+ for i := 0; i < maxLogValues; i++ {
+ if v.Kind() != KindLogValuer {
+ return v
+ }
+ v = v.LogValuer().LogValue()
+ }
+ err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any())
+ return AnyValue(err)
+}
+
+func stack(skip, nFrames int) string {
+ pcs := make([]uintptr, nFrames+1)
+ n := runtime.Callers(skip+1, pcs)
+ if n == 0 {
+ return "(no stack)"
+ }
+ frames := runtime.CallersFrames(pcs[:n])
+ var b strings.Builder
+ i := 0
+ for {
+ frame, more := frames.Next()
+ fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line)
+ if !more {
+ break
+ }
+ i++
+ if i >= nFrames {
+ fmt.Fprintf(&b, "(rest of stack elided)\n")
+ break
+ }
+ }
+ return b.String()
+}
diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go
new file mode 100644
index 0000000..29b0d73
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/value_119.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19 && !go1.20
+
+package slog
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type (
+ stringptr unsafe.Pointer // used in Value.any when the Value is a string
+ groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr
+)
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&value))
+ return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)}
+}
+
+func (v Value) str() string {
+ var s string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ hdr.Data = uintptr(v.any.(stringptr))
+ hdr.Len = int(v.num)
+ return s
+}
+
+// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
+// the methods Int64, Float64, and so on, which panic if v is of the
+// wrong kind, String never panics.
+func (v Value) String() string {
+ if sp, ok := v.any.(stringptr); ok {
+ // Inlining this code makes a huge difference.
+ var s string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ hdr.Data = uintptr(sp)
+ hdr.Len = int(v.num)
+ return s
+ }
+ return string(v.append(nil))
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as))
+ return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)}
+}
diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go
new file mode 100644
index 0000000..f7d4c09
--- /dev/null
+++ b/vendor/golang.org/x/exp/slog/value_120.go
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+
+package slog
+
+import "unsafe"
+
+type (
+ stringptr *byte // used in Value.any when the Value is a string
+ groupptr *Attr // used in Value.any when the Value is a []Attr
+)
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))}
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))}
+}
+
+// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
+// the methods Int64, Float64, and so on, which panic if v is of the
+// wrong kind, String never panics.
+func (v Value) String() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ return string(v.append(nil))
+}
+
+func (v Value) str() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
new file mode 100644
index 0000000..cd0a8ac
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atom provides integer codes (also known as atoms) for a fixed set of
+// frequently occurring HTML strings: tag names and attribute keys such as "p"
+// and "id".
+//
+// Sharing an atom's name between all elements with the same tag can result in
+// fewer string allocations when tokenizing and parsing HTML. Integer
+// comparisons are also generally faster than string comparisons.
+//
+// The value of an atom's particular code is not guaranteed to stay the same
+// between versions of this package. Neither is any ordering guaranteed:
+// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
+// be dense. The only guarantees are that e.g. looking up "div" will yield
+// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
+package atom // import "golang.org/x/net/html/atom"
+
+// Atom is an integer code for a string. The zero value maps to "".
+type Atom uint32
+
+// String returns the atom's name.
+func (a Atom) String() string {
+ start := uint32(a >> 8)
+ n := uint32(a & 0xff)
+ if start+n > uint32(len(atomText)) {
+ return ""
+ }
+ return atomText[start : start+n]
+}
+
+func (a Atom) string() string {
+ return atomText[a>>8 : a>>8+a&0xff]
+}
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s []byte) uint32 {
+ for i := range s {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+func match(s string, t []byte) bool {
+ for i, c := range t {
+ if s[i] != c {
+ return false
+ }
+ }
+ return true
+}
+
+// Lookup returns the atom whose name is s. It returns zero if there is no
+// such atom. The lookup is case sensitive.
+func Lookup(s []byte) Atom {
+ if len(s) == 0 || len(s) > maxAtomLen {
+ return 0
+ }
+ h := fnv(hash0, s)
+ if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ return 0
+}
+
+// String returns a string whose contents are equal to s. In that sense, it is
+// equivalent to string(s) but may be more efficient.
+func String(s []byte) string {
+ if a := Lookup(s); a != 0 {
+ return a.String()
+ }
+ return string(s)
+}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
new file mode 100644
index 0000000..2a93886
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -0,0 +1,783 @@
+// Code generated by go generate gen.go; DO NOT EDIT.
+
+//go:generate go run gen.go
+
+package atom
+
+const (
+ A Atom = 0x1
+ Abbr Atom = 0x4
+ Accept Atom = 0x1a06
+ AcceptCharset Atom = 0x1a0e
+ Accesskey Atom = 0x2c09
+ Acronym Atom = 0xaa07
+ Action Atom = 0x27206
+ Address Atom = 0x6f307
+ Align Atom = 0xb105
+ Allowfullscreen Atom = 0x2080f
+ Allowpaymentrequest Atom = 0xc113
+ Allowusermedia Atom = 0xdd0e
+ Alt Atom = 0xf303
+ Annotation Atom = 0x1c90a
+ AnnotationXml Atom = 0x1c90e
+ Applet Atom = 0x31906
+ Area Atom = 0x35604
+ Article Atom = 0x3fc07
+ As Atom = 0x3c02
+ Aside Atom = 0x10705
+ Async Atom = 0xff05
+ Audio Atom = 0x11505
+ Autocomplete Atom = 0x2780c
+ Autofocus Atom = 0x12109
+ Autoplay Atom = 0x13c08
+ B Atom = 0x101
+ Base Atom = 0x3b04
+ Basefont Atom = 0x3b08
+ Bdi Atom = 0xba03
+ Bdo Atom = 0x14b03
+ Bgsound Atom = 0x15e07
+ Big Atom = 0x17003
+ Blink Atom = 0x17305
+ Blockquote Atom = 0x1870a
+ Body Atom = 0x2804
+ Br Atom = 0x202
+ Button Atom = 0x19106
+ Canvas Atom = 0x10306
+ Caption Atom = 0x23107
+ Center Atom = 0x22006
+ Challenge Atom = 0x29b09
+ Charset Atom = 0x2107
+ Checked Atom = 0x47907
+ Cite Atom = 0x19c04
+ Class Atom = 0x56405
+ Code Atom = 0x5c504
+ Col Atom = 0x1ab03
+ Colgroup Atom = 0x1ab08
+ Color Atom = 0x1bf05
+ Cols Atom = 0x1c404
+ Colspan Atom = 0x1c407
+ Command Atom = 0x1d707
+ Content Atom = 0x58b07
+ Contenteditable Atom = 0x58b0f
+ Contextmenu Atom = 0x3800b
+ Controls Atom = 0x1de08
+ Coords Atom = 0x1ea06
+ Crossorigin Atom = 0x1fb0b
+ Data Atom = 0x4a504
+ Datalist Atom = 0x4a508
+ Datetime Atom = 0x2b808
+ Dd Atom = 0x2d702
+ Default Atom = 0x10a07
+ Defer Atom = 0x5c705
+ Del Atom = 0x45203
+ Desc Atom = 0x56104
+ Details Atom = 0x7207
+ Dfn Atom = 0x8703
+ Dialog Atom = 0xbb06
+ Dir Atom = 0x9303
+ Dirname Atom = 0x9307
+ Disabled Atom = 0x16408
+ Div Atom = 0x16b03
+ Dl Atom = 0x5e602
+ Download Atom = 0x46308
+ Draggable Atom = 0x17a09
+ Dropzone Atom = 0x40508
+ Dt Atom = 0x64b02
+ Em Atom = 0x6e02
+ Embed Atom = 0x6e05
+ Enctype Atom = 0x28d07
+ Face Atom = 0x21e04
+ Fieldset Atom = 0x22608
+ Figcaption Atom = 0x22e0a
+ Figure Atom = 0x24806
+ Font Atom = 0x3f04
+ Footer Atom = 0xf606
+ For Atom = 0x25403
+ ForeignObject Atom = 0x2540d
+ Foreignobject Atom = 0x2610d
+ Form Atom = 0x26e04
+ Formaction Atom = 0x26e0a
+ Formenctype Atom = 0x2890b
+ Formmethod Atom = 0x2a40a
+ Formnovalidate Atom = 0x2ae0e
+ Formtarget Atom = 0x2c00a
+ Frame Atom = 0x8b05
+ Frameset Atom = 0x8b08
+ H1 Atom = 0x15c02
+ H2 Atom = 0x2de02
+ H3 Atom = 0x30d02
+ H4 Atom = 0x34502
+ H5 Atom = 0x34f02
+ H6 Atom = 0x64d02
+ Head Atom = 0x33104
+ Header Atom = 0x33106
+ Headers Atom = 0x33107
+ Height Atom = 0x5206
+ Hgroup Atom = 0x2ca06
+ Hidden Atom = 0x2d506
+ High Atom = 0x2db04
+ Hr Atom = 0x15702
+ Href Atom = 0x2e004
+ Hreflang Atom = 0x2e008
+ Html Atom = 0x5604
+ HttpEquiv Atom = 0x2e80a
+ I Atom = 0x601
+ Icon Atom = 0x58a04
+ Id Atom = 0x10902
+ Iframe Atom = 0x2fc06
+ Image Atom = 0x30205
+ Img Atom = 0x30703
+ Input Atom = 0x44b05
+ Inputmode Atom = 0x44b09
+ Ins Atom = 0x20403
+ Integrity Atom = 0x23f09
+ Is Atom = 0x16502
+ Isindex Atom = 0x30f07
+ Ismap Atom = 0x31605
+ Itemid Atom = 0x38b06
+ Itemprop Atom = 0x19d08
+ Itemref Atom = 0x3cd07
+ Itemscope Atom = 0x67109
+ Itemtype Atom = 0x31f08
+ Kbd Atom = 0xb903
+ Keygen Atom = 0x3206
+ Keytype Atom = 0xd607
+ Kind Atom = 0x17704
+ Label Atom = 0x5905
+ Lang Atom = 0x2e404
+ Legend Atom = 0x18106
+ Li Atom = 0xb202
+ Link Atom = 0x17404
+ List Atom = 0x4a904
+ Listing Atom = 0x4a907
+ Loop Atom = 0x5d04
+ Low Atom = 0xc303
+ Main Atom = 0x1004
+ Malignmark Atom = 0xb00a
+ Manifest Atom = 0x6d708
+ Map Atom = 0x31803
+ Mark Atom = 0xb604
+ Marquee Atom = 0x32707
+ Math Atom = 0x32e04
+ Max Atom = 0x33d03
+ Maxlength Atom = 0x33d09
+ Media Atom = 0xe605
+ Mediagroup Atom = 0xe60a
+ Menu Atom = 0x38704
+ Menuitem Atom = 0x38708
+ Meta Atom = 0x4b804
+ Meter Atom = 0x9805
+ Method Atom = 0x2a806
+ Mglyph Atom = 0x30806
+ Mi Atom = 0x34702
+ Min Atom = 0x34703
+ Minlength Atom = 0x34709
+ Mn Atom = 0x2b102
+ Mo Atom = 0xa402
+ Ms Atom = 0x67402
+ Mtext Atom = 0x35105
+ Multiple Atom = 0x35f08
+ Muted Atom = 0x36705
+ Name Atom = 0x9604
+ Nav Atom = 0x1303
+ Nobr Atom = 0x3704
+ Noembed Atom = 0x6c07
+ Noframes Atom = 0x8908
+ Nomodule Atom = 0xa208
+ Nonce Atom = 0x1a605
+ Noscript Atom = 0x21608
+ Novalidate Atom = 0x2b20a
+ Object Atom = 0x26806
+ Ol Atom = 0x13702
+ Onabort Atom = 0x19507
+ Onafterprint Atom = 0x2360c
+ Onautocomplete Atom = 0x2760e
+ Onautocompleteerror Atom = 0x27613
+ Onauxclick Atom = 0x61f0a
+ Onbeforeprint Atom = 0x69e0d
+ Onbeforeunload Atom = 0x6e70e
+ Onblur Atom = 0x56d06
+ Oncancel Atom = 0x11908
+ Oncanplay Atom = 0x14d09
+ Oncanplaythrough Atom = 0x14d10
+ Onchange Atom = 0x41b08
+ Onclick Atom = 0x2f507
+ Onclose Atom = 0x36c07
+ Oncontextmenu Atom = 0x37e0d
+ Oncopy Atom = 0x39106
+ Oncuechange Atom = 0x3970b
+ Oncut Atom = 0x3a205
+ Ondblclick Atom = 0x3a70a
+ Ondrag Atom = 0x3b106
+ Ondragend Atom = 0x3b109
+ Ondragenter Atom = 0x3ba0b
+ Ondragexit Atom = 0x3c50a
+ Ondragleave Atom = 0x3df0b
+ Ondragover Atom = 0x3ea0a
+ Ondragstart Atom = 0x3f40b
+ Ondrop Atom = 0x40306
+ Ondurationchange Atom = 0x41310
+ Onemptied Atom = 0x40a09
+ Onended Atom = 0x42307
+ Onerror Atom = 0x42a07
+ Onfocus Atom = 0x43107
+ Onhashchange Atom = 0x43d0c
+ Oninput Atom = 0x44907
+ Oninvalid Atom = 0x45509
+ Onkeydown Atom = 0x45e09
+ Onkeypress Atom = 0x46b0a
+ Onkeyup Atom = 0x48007
+ Onlanguagechange Atom = 0x48d10
+ Onload Atom = 0x49d06
+ Onloadeddata Atom = 0x49d0c
+ Onloadedmetadata Atom = 0x4b010
+ Onloadend Atom = 0x4c609
+ Onloadstart Atom = 0x4cf0b
+ Onmessage Atom = 0x4da09
+ Onmessageerror Atom = 0x4da0e
+ Onmousedown Atom = 0x4e80b
+ Onmouseenter Atom = 0x4f30c
+ Onmouseleave Atom = 0x4ff0c
+ Onmousemove Atom = 0x50b0b
+ Onmouseout Atom = 0x5160a
+ Onmouseover Atom = 0x5230b
+ Onmouseup Atom = 0x52e09
+ Onmousewheel Atom = 0x53c0c
+ Onoffline Atom = 0x54809
+ Ononline Atom = 0x55108
+ Onpagehide Atom = 0x5590a
+ Onpageshow Atom = 0x5730a
+ Onpaste Atom = 0x57f07
+ Onpause Atom = 0x59a07
+ Onplay Atom = 0x5a406
+ Onplaying Atom = 0x5a409
+ Onpopstate Atom = 0x5ad0a
+ Onprogress Atom = 0x5b70a
+ Onratechange Atom = 0x5cc0c
+ Onrejectionhandled Atom = 0x5d812
+ Onreset Atom = 0x5ea07
+ Onresize Atom = 0x5f108
+ Onscroll Atom = 0x60008
+ Onsecuritypolicyviolation Atom = 0x60819
+ Onseeked Atom = 0x62908
+ Onseeking Atom = 0x63109
+ Onselect Atom = 0x63a08
+ Onshow Atom = 0x64406
+ Onsort Atom = 0x64f06
+ Onstalled Atom = 0x65909
+ Onstorage Atom = 0x66209
+ Onsubmit Atom = 0x66b08
+ Onsuspend Atom = 0x67b09
+ Ontimeupdate Atom = 0x400c
+ Ontoggle Atom = 0x68408
+ Onunhandledrejection Atom = 0x68c14
+ Onunload Atom = 0x6ab08
+ Onvolumechange Atom = 0x6b30e
+ Onwaiting Atom = 0x6c109
+ Onwheel Atom = 0x6ca07
+ Open Atom = 0x1a304
+ Optgroup Atom = 0x5f08
+ Optimum Atom = 0x6d107
+ Option Atom = 0x6e306
+ Output Atom = 0x51d06
+ P Atom = 0xc01
+ Param Atom = 0xc05
+ Pattern Atom = 0x6607
+ Picture Atom = 0x7b07
+ Ping Atom = 0xef04
+ Placeholder Atom = 0x1310b
+ Plaintext Atom = 0x1b209
+ Playsinline Atom = 0x1400b
+ Poster Atom = 0x2cf06
+ Pre Atom = 0x47003
+ Preload Atom = 0x48607
+ Progress Atom = 0x5b908
+ Prompt Atom = 0x53606
+ Public Atom = 0x58606
+ Q Atom = 0xcf01
+ Radiogroup Atom = 0x30a
+ Rb Atom = 0x3a02
+ Readonly Atom = 0x35708
+ Referrerpolicy Atom = 0x3d10e
+ Rel Atom = 0x48703
+ Required Atom = 0x24c08
+ Reversed Atom = 0x8008
+ Rows Atom = 0x9c04
+ Rowspan Atom = 0x9c07
+ Rp Atom = 0x23c02
+ Rt Atom = 0x19a02
+ Rtc Atom = 0x19a03
+ Ruby Atom = 0xfb04
+ S Atom = 0x2501
+ Samp Atom = 0x7804
+ Sandbox Atom = 0x12907
+ Scope Atom = 0x67505
+ Scoped Atom = 0x67506
+ Script Atom = 0x21806
+ Seamless Atom = 0x37108
+ Section Atom = 0x56807
+ Select Atom = 0x63c06
+ Selected Atom = 0x63c08
+ Shape Atom = 0x1e505
+ Size Atom = 0x5f504
+ Sizes Atom = 0x5f505
+ Slot Atom = 0x1ef04
+ Small Atom = 0x20605
+ Sortable Atom = 0x65108
+ Sorted Atom = 0x33706
+ Source Atom = 0x37806
+ Spacer Atom = 0x43706
+ Span Atom = 0x9f04
+ Spellcheck Atom = 0x4740a
+ Src Atom = 0x5c003
+ Srcdoc Atom = 0x5c006
+ Srclang Atom = 0x5f907
+ Srcset Atom = 0x6f906
+ Start Atom = 0x3fa05
+ Step Atom = 0x58304
+ Strike Atom = 0xd206
+ Strong Atom = 0x6dd06
+ Style Atom = 0x6ff05
+ Sub Atom = 0x66d03
+ Summary Atom = 0x70407
+ Sup Atom = 0x70b03
+ Svg Atom = 0x70e03
+ System Atom = 0x71106
+ Tabindex Atom = 0x4be08
+ Table Atom = 0x59505
+ Target Atom = 0x2c406
+ Tbody Atom = 0x2705
+ Td Atom = 0x9202
+ Template Atom = 0x71408
+ Textarea Atom = 0x35208
+ Tfoot Atom = 0xf505
+ Th Atom = 0x15602
+ Thead Atom = 0x33005
+ Time Atom = 0x4204
+ Title Atom = 0x11005
+ Tr Atom = 0xcc02
+ Track Atom = 0x1ba05
+ Translate Atom = 0x1f209
+ Tt Atom = 0x6802
+ Type Atom = 0xd904
+ Typemustmatch Atom = 0x2900d
+ U Atom = 0xb01
+ Ul Atom = 0xa702
+ Updateviacache Atom = 0x460e
+ Usemap Atom = 0x59e06
+ Value Atom = 0x1505
+ Var Atom = 0x16d03
+ Video Atom = 0x2f105
+ Wbr Atom = 0x57c03
+ Width Atom = 0x64905
+ Workertype Atom = 0x71c0a
+ Wrap Atom = 0x72604
+ Xmp Atom = 0x12f03
+)
+
+const hash0 = 0x81cdf10e
+
+const maxAtomLen = 25
+
+var table = [1 << 9]Atom{
+ 0x1: 0xe60a, // mediagroup
+ 0x2: 0x2e404, // lang
+ 0x4: 0x2c09, // accesskey
+ 0x5: 0x8b08, // frameset
+ 0x7: 0x63a08, // onselect
+ 0x8: 0x71106, // system
+ 0xa: 0x64905, // width
+ 0xc: 0x2890b, // formenctype
+ 0xd: 0x13702, // ol
+ 0xe: 0x3970b, // oncuechange
+ 0x10: 0x14b03, // bdo
+ 0x11: 0x11505, // audio
+ 0x12: 0x17a09, // draggable
+ 0x14: 0x2f105, // video
+ 0x15: 0x2b102, // mn
+ 0x16: 0x38704, // menu
+ 0x17: 0x2cf06, // poster
+ 0x19: 0xf606, // footer
+ 0x1a: 0x2a806, // method
+ 0x1b: 0x2b808, // datetime
+ 0x1c: 0x19507, // onabort
+ 0x1d: 0x460e, // updateviacache
+ 0x1e: 0xff05, // async
+ 0x1f: 0x49d06, // onload
+ 0x21: 0x11908, // oncancel
+ 0x22: 0x62908, // onseeked
+ 0x23: 0x30205, // image
+ 0x24: 0x5d812, // onrejectionhandled
+ 0x26: 0x17404, // link
+ 0x27: 0x51d06, // output
+ 0x28: 0x33104, // head
+ 0x29: 0x4ff0c, // onmouseleave
+ 0x2a: 0x57f07, // onpaste
+ 0x2b: 0x5a409, // onplaying
+ 0x2c: 0x1c407, // colspan
+ 0x2f: 0x1bf05, // color
+ 0x30: 0x5f504, // size
+ 0x31: 0x2e80a, // http-equiv
+ 0x33: 0x601, // i
+ 0x34: 0x5590a, // onpagehide
+ 0x35: 0x68c14, // onunhandledrejection
+ 0x37: 0x42a07, // onerror
+ 0x3a: 0x3b08, // basefont
+ 0x3f: 0x1303, // nav
+ 0x40: 0x17704, // kind
+ 0x41: 0x35708, // readonly
+ 0x42: 0x30806, // mglyph
+ 0x44: 0xb202, // li
+ 0x46: 0x2d506, // hidden
+ 0x47: 0x70e03, // svg
+ 0x48: 0x58304, // step
+ 0x49: 0x23f09, // integrity
+ 0x4a: 0x58606, // public
+ 0x4c: 0x1ab03, // col
+ 0x4d: 0x1870a, // blockquote
+ 0x4e: 0x34f02, // h5
+ 0x50: 0x5b908, // progress
+ 0x51: 0x5f505, // sizes
+ 0x52: 0x34502, // h4
+ 0x56: 0x33005, // thead
+ 0x57: 0xd607, // keytype
+ 0x58: 0x5b70a, // onprogress
+ 0x59: 0x44b09, // inputmode
+ 0x5a: 0x3b109, // ondragend
+ 0x5d: 0x3a205, // oncut
+ 0x5e: 0x43706, // spacer
+ 0x5f: 0x1ab08, // colgroup
+ 0x62: 0x16502, // is
+ 0x65: 0x3c02, // as
+ 0x66: 0x54809, // onoffline
+ 0x67: 0x33706, // sorted
+ 0x69: 0x48d10, // onlanguagechange
+ 0x6c: 0x43d0c, // onhashchange
+ 0x6d: 0x9604, // name
+ 0x6e: 0xf505, // tfoot
+ 0x6f: 0x56104, // desc
+ 0x70: 0x33d03, // max
+ 0x72: 0x1ea06, // coords
+ 0x73: 0x30d02, // h3
+ 0x74: 0x6e70e, // onbeforeunload
+ 0x75: 0x9c04, // rows
+ 0x76: 0x63c06, // select
+ 0x77: 0x9805, // meter
+ 0x78: 0x38b06, // itemid
+ 0x79: 0x53c0c, // onmousewheel
+ 0x7a: 0x5c006, // srcdoc
+ 0x7d: 0x1ba05, // track
+ 0x7f: 0x31f08, // itemtype
+ 0x82: 0xa402, // mo
+ 0x83: 0x41b08, // onchange
+ 0x84: 0x33107, // headers
+ 0x85: 0x5cc0c, // onratechange
+ 0x86: 0x60819, // onsecuritypolicyviolation
+ 0x88: 0x4a508, // datalist
+ 0x89: 0x4e80b, // onmousedown
+ 0x8a: 0x1ef04, // slot
+ 0x8b: 0x4b010, // onloadedmetadata
+ 0x8c: 0x1a06, // accept
+ 0x8d: 0x26806, // object
+ 0x91: 0x6b30e, // onvolumechange
+ 0x92: 0x2107, // charset
+ 0x93: 0x27613, // onautocompleteerror
+ 0x94: 0xc113, // allowpaymentrequest
+ 0x95: 0x2804, // body
+ 0x96: 0x10a07, // default
+ 0x97: 0x63c08, // selected
+ 0x98: 0x21e04, // face
+ 0x99: 0x1e505, // shape
+ 0x9b: 0x68408, // ontoggle
+ 0x9e: 0x64b02, // dt
+ 0x9f: 0xb604, // mark
+ 0xa1: 0xb01, // u
+ 0xa4: 0x6ab08, // onunload
+ 0xa5: 0x5d04, // loop
+ 0xa6: 0x16408, // disabled
+ 0xaa: 0x42307, // onended
+ 0xab: 0xb00a, // malignmark
+ 0xad: 0x67b09, // onsuspend
+ 0xae: 0x35105, // mtext
+ 0xaf: 0x64f06, // onsort
+ 0xb0: 0x19d08, // itemprop
+ 0xb3: 0x67109, // itemscope
+ 0xb4: 0x17305, // blink
+ 0xb6: 0x3b106, // ondrag
+ 0xb7: 0xa702, // ul
+ 0xb8: 0x26e04, // form
+ 0xb9: 0x12907, // sandbox
+ 0xba: 0x8b05, // frame
+ 0xbb: 0x1505, // value
+ 0xbc: 0x66209, // onstorage
+ 0xbf: 0xaa07, // acronym
+ 0xc0: 0x19a02, // rt
+ 0xc2: 0x202, // br
+ 0xc3: 0x22608, // fieldset
+ 0xc4: 0x2900d, // typemustmatch
+ 0xc5: 0xa208, // nomodule
+ 0xc6: 0x6c07, // noembed
+ 0xc7: 0x69e0d, // onbeforeprint
+ 0xc8: 0x19106, // button
+ 0xc9: 0x2f507, // onclick
+ 0xca: 0x70407, // summary
+ 0xcd: 0xfb04, // ruby
+ 0xce: 0x56405, // class
+ 0xcf: 0x3f40b, // ondragstart
+ 0xd0: 0x23107, // caption
+ 0xd4: 0xdd0e, // allowusermedia
+ 0xd5: 0x4cf0b, // onloadstart
+ 0xd9: 0x16b03, // div
+ 0xda: 0x4a904, // list
+ 0xdb: 0x32e04, // math
+ 0xdc: 0x44b05, // input
+ 0xdf: 0x3ea0a, // ondragover
+ 0xe0: 0x2de02, // h2
+ 0xe2: 0x1b209, // plaintext
+ 0xe4: 0x4f30c, // onmouseenter
+ 0xe7: 0x47907, // checked
+ 0xe8: 0x47003, // pre
+ 0xea: 0x35f08, // multiple
+ 0xeb: 0xba03, // bdi
+ 0xec: 0x33d09, // maxlength
+ 0xed: 0xcf01, // q
+ 0xee: 0x61f0a, // onauxclick
+ 0xf0: 0x57c03, // wbr
+ 0xf2: 0x3b04, // base
+ 0xf3: 0x6e306, // option
+ 0xf5: 0x41310, // ondurationchange
+ 0xf7: 0x8908, // noframes
+ 0xf9: 0x40508, // dropzone
+ 0xfb: 0x67505, // scope
+ 0xfc: 0x8008, // reversed
+ 0xfd: 0x3ba0b, // ondragenter
+ 0xfe: 0x3fa05, // start
+ 0xff: 0x12f03, // xmp
+ 0x100: 0x5f907, // srclang
+ 0x101: 0x30703, // img
+ 0x104: 0x101, // b
+ 0x105: 0x25403, // for
+ 0x106: 0x10705, // aside
+ 0x107: 0x44907, // oninput
+ 0x108: 0x35604, // area
+ 0x109: 0x2a40a, // formmethod
+ 0x10a: 0x72604, // wrap
+ 0x10c: 0x23c02, // rp
+ 0x10d: 0x46b0a, // onkeypress
+ 0x10e: 0x6802, // tt
+ 0x110: 0x34702, // mi
+ 0x111: 0x36705, // muted
+ 0x112: 0xf303, // alt
+ 0x113: 0x5c504, // code
+ 0x114: 0x6e02, // em
+ 0x115: 0x3c50a, // ondragexit
+ 0x117: 0x9f04, // span
+ 0x119: 0x6d708, // manifest
+ 0x11a: 0x38708, // menuitem
+ 0x11b: 0x58b07, // content
+ 0x11d: 0x6c109, // onwaiting
+ 0x11f: 0x4c609, // onloadend
+ 0x121: 0x37e0d, // oncontextmenu
+ 0x123: 0x56d06, // onblur
+ 0x124: 0x3fc07, // article
+ 0x125: 0x9303, // dir
+ 0x126: 0xef04, // ping
+ 0x127: 0x24c08, // required
+ 0x128: 0x45509, // oninvalid
+ 0x129: 0xb105, // align
+ 0x12b: 0x58a04, // icon
+ 0x12c: 0x64d02, // h6
+ 0x12d: 0x1c404, // cols
+ 0x12e: 0x22e0a, // figcaption
+ 0x12f: 0x45e09, // onkeydown
+ 0x130: 0x66b08, // onsubmit
+ 0x131: 0x14d09, // oncanplay
+ 0x132: 0x70b03, // sup
+ 0x133: 0xc01, // p
+ 0x135: 0x40a09, // onemptied
+ 0x136: 0x39106, // oncopy
+ 0x137: 0x19c04, // cite
+ 0x138: 0x3a70a, // ondblclick
+ 0x13a: 0x50b0b, // onmousemove
+ 0x13c: 0x66d03, // sub
+ 0x13d: 0x48703, // rel
+ 0x13e: 0x5f08, // optgroup
+ 0x142: 0x9c07, // rowspan
+ 0x143: 0x37806, // source
+ 0x144: 0x21608, // noscript
+ 0x145: 0x1a304, // open
+ 0x146: 0x20403, // ins
+ 0x147: 0x2540d, // foreignObject
+ 0x148: 0x5ad0a, // onpopstate
+ 0x14a: 0x28d07, // enctype
+ 0x14b: 0x2760e, // onautocomplete
+ 0x14c: 0x35208, // textarea
+ 0x14e: 0x2780c, // autocomplete
+ 0x14f: 0x15702, // hr
+ 0x150: 0x1de08, // controls
+ 0x151: 0x10902, // id
+ 0x153: 0x2360c, // onafterprint
+ 0x155: 0x2610d, // foreignobject
+ 0x156: 0x32707, // marquee
+ 0x157: 0x59a07, // onpause
+ 0x158: 0x5e602, // dl
+ 0x159: 0x5206, // height
+ 0x15a: 0x34703, // min
+ 0x15b: 0x9307, // dirname
+ 0x15c: 0x1f209, // translate
+ 0x15d: 0x5604, // html
+ 0x15e: 0x34709, // minlength
+ 0x15f: 0x48607, // preload
+ 0x160: 0x71408, // template
+ 0x161: 0x3df0b, // ondragleave
+ 0x162: 0x3a02, // rb
+ 0x164: 0x5c003, // src
+ 0x165: 0x6dd06, // strong
+ 0x167: 0x7804, // samp
+ 0x168: 0x6f307, // address
+ 0x169: 0x55108, // ononline
+ 0x16b: 0x1310b, // placeholder
+ 0x16c: 0x2c406, // target
+ 0x16d: 0x20605, // small
+ 0x16e: 0x6ca07, // onwheel
+ 0x16f: 0x1c90a, // annotation
+ 0x170: 0x4740a, // spellcheck
+ 0x171: 0x7207, // details
+ 0x172: 0x10306, // canvas
+ 0x173: 0x12109, // autofocus
+ 0x174: 0xc05, // param
+ 0x176: 0x46308, // download
+ 0x177: 0x45203, // del
+ 0x178: 0x36c07, // onclose
+ 0x179: 0xb903, // kbd
+ 0x17a: 0x31906, // applet
+ 0x17b: 0x2e004, // href
+ 0x17c: 0x5f108, // onresize
+ 0x17e: 0x49d0c, // onloadeddata
+ 0x180: 0xcc02, // tr
+ 0x181: 0x2c00a, // formtarget
+ 0x182: 0x11005, // title
+ 0x183: 0x6ff05, // style
+ 0x184: 0xd206, // strike
+ 0x185: 0x59e06, // usemap
+ 0x186: 0x2fc06, // iframe
+ 0x187: 0x1004, // main
+ 0x189: 0x7b07, // picture
+ 0x18c: 0x31605, // ismap
+ 0x18e: 0x4a504, // data
+ 0x18f: 0x5905, // label
+ 0x191: 0x3d10e, // referrerpolicy
+ 0x192: 0x15602, // th
+ 0x194: 0x53606, // prompt
+ 0x195: 0x56807, // section
+ 0x197: 0x6d107, // optimum
+ 0x198: 0x2db04, // high
+ 0x199: 0x15c02, // h1
+ 0x19a: 0x65909, // onstalled
+ 0x19b: 0x16d03, // var
+ 0x19c: 0x4204, // time
+ 0x19e: 0x67402, // ms
+ 0x19f: 0x33106, // header
+ 0x1a0: 0x4da09, // onmessage
+ 0x1a1: 0x1a605, // nonce
+ 0x1a2: 0x26e0a, // formaction
+ 0x1a3: 0x22006, // center
+ 0x1a4: 0x3704, // nobr
+ 0x1a5: 0x59505, // table
+ 0x1a6: 0x4a907, // listing
+ 0x1a7: 0x18106, // legend
+ 0x1a9: 0x29b09, // challenge
+ 0x1aa: 0x24806, // figure
+ 0x1ab: 0xe605, // media
+ 0x1ae: 0xd904, // type
+ 0x1af: 0x3f04, // font
+ 0x1b0: 0x4da0e, // onmessageerror
+ 0x1b1: 0x37108, // seamless
+ 0x1b2: 0x8703, // dfn
+ 0x1b3: 0x5c705, // defer
+ 0x1b4: 0xc303, // low
+ 0x1b5: 0x19a03, // rtc
+ 0x1b6: 0x5230b, // onmouseover
+ 0x1b7: 0x2b20a, // novalidate
+ 0x1b8: 0x71c0a, // workertype
+ 0x1ba: 0x3cd07, // itemref
+ 0x1bd: 0x1, // a
+ 0x1be: 0x31803, // map
+ 0x1bf: 0x400c, // ontimeupdate
+ 0x1c0: 0x15e07, // bgsound
+ 0x1c1: 0x3206, // keygen
+ 0x1c2: 0x2705, // tbody
+ 0x1c5: 0x64406, // onshow
+ 0x1c7: 0x2501, // s
+ 0x1c8: 0x6607, // pattern
+ 0x1cc: 0x14d10, // oncanplaythrough
+ 0x1ce: 0x2d702, // dd
+ 0x1cf: 0x6f906, // srcset
+ 0x1d0: 0x17003, // big
+ 0x1d2: 0x65108, // sortable
+ 0x1d3: 0x48007, // onkeyup
+ 0x1d5: 0x5a406, // onplay
+ 0x1d7: 0x4b804, // meta
+ 0x1d8: 0x40306, // ondrop
+ 0x1da: 0x60008, // onscroll
+ 0x1db: 0x1fb0b, // crossorigin
+ 0x1dc: 0x5730a, // onpageshow
+ 0x1dd: 0x4, // abbr
+ 0x1de: 0x9202, // td
+ 0x1df: 0x58b0f, // contenteditable
+ 0x1e0: 0x27206, // action
+ 0x1e1: 0x1400b, // playsinline
+ 0x1e2: 0x43107, // onfocus
+ 0x1e3: 0x2e008, // hreflang
+ 0x1e5: 0x5160a, // onmouseout
+ 0x1e6: 0x5ea07, // onreset
+ 0x1e7: 0x13c08, // autoplay
+ 0x1e8: 0x63109, // onseeking
+ 0x1ea: 0x67506, // scoped
+ 0x1ec: 0x30a, // radiogroup
+ 0x1ee: 0x3800b, // contextmenu
+ 0x1ef: 0x52e09, // onmouseup
+ 0x1f1: 0x2ca06, // hgroup
+ 0x1f2: 0x2080f, // allowfullscreen
+ 0x1f3: 0x4be08, // tabindex
+ 0x1f6: 0x30f07, // isindex
+ 0x1f7: 0x1a0e, // accept-charset
+ 0x1f8: 0x2ae0e, // formnovalidate
+ 0x1fb: 0x1c90e, // annotation-xml
+ 0x1fc: 0x6e05, // embed
+ 0x1fd: 0x21806, // script
+ 0x1fe: 0xbb06, // dialog
+ 0x1ff: 0x1d707, // command
+}
+
+const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
+ "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
+ "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
+ "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
+ "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
+ "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
+ "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
+ "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
+ "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
+ "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
+ "ignObjectforeignobjectformactionautocompleteerrorformenctype" +
+ "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
+ "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
+ "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
+ "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
+ "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
+ "enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
+ "articleondropzonemptiedondurationchangeonendedonerroronfocus" +
+ "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
+ "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
+ "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
+ "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
+ "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
+ "classectionbluronpageshowbronpastepublicontenteditableonpaus" +
+ "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
+ "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
+ "violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
+ "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
+ "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
+ "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
+ "arysupsvgsystemplateworkertypewrap"
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
new file mode 100644
index 0000000..ff7acf2
--- /dev/null
+++ b/vendor/golang.org/x/net/html/const.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// Section 12.2.4.2 of the HTML5 specification says "The following elements
+// have varying levels of special parsing rules".
+// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
+var isSpecialElementMap = map[string]bool{
+ "address": true,
+ "applet": true,
+ "area": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "bgsound": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "button": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "embed": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hgroup": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "li": true,
+ "link": true,
+ "listing": true,
+ "main": true,
+ "marquee": true,
+ "menu": true,
+ "meta": true,
+ "nav": true,
+ "noembed": true,
+ "noframes": true,
+ "noscript": true,
+ "object": true,
+ "ol": true,
+ "p": true,
+ "param": true,
+ "plaintext": true,
+ "pre": true,
+ "script": true,
+ "section": true,
+ "select": true,
+ "source": true,
+ "style": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "template": true,
+ "textarea": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+ "wbr": true,
+ "xmp": true,
+}
+
+func isSpecialElement(element *Node) bool {
+ switch element.Namespace {
+ case "", "html":
+ return isSpecialElementMap[element.Data]
+ case "math":
+ switch element.Data {
+ case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
+ return true
+ }
+ case "svg":
+ switch element.Data {
+ case "foreignObject", "desc", "title":
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
new file mode 100644
index 0000000..885c4c5
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -0,0 +1,122 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package html implements an HTML5-compliant tokenizer and parser.
+
+Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
+caller's responsibility to ensure that r provides UTF-8 encoded HTML.
+
+ z := html.NewTokenizer(r)
+
+Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
+which parses the next token and returns its type, or an error:
+
+ for {
+ tt := z.Next()
+ if tt == html.ErrorToken {
+ // ...
+ return ...
+ }
+ // Process the current token.
+ }
+
+There are two APIs for retrieving the current token. The high-level API is to
+call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
+allow optionally calling Raw after Next but before Token, Text, TagName, or
+TagAttr. In EBNF notation, the valid call sequence per token is:
+
+ Next {Raw} [ Token | Text | TagName {TagAttr} ]
+
+Token returns an independent data structure that completely describes a token.
+Entities (such as "<") are unescaped, tag names and attribute keys are
+lower-cased, and attributes are collected into a []Attribute. For example:
+
+ for {
+ if z.Next() == html.ErrorToken {
+ // Returning io.EOF indicates success.
+ return z.Err()
+ }
+ emitToken(z.Token())
+ }
+
+The low-level API performs fewer allocations and copies, but the contents of
+the []byte values returned by Text, TagName and TagAttr may change on the next
+call to Next. For example, to extract an HTML page's anchor text:
+
+ depth := 0
+ for {
+ tt := z.Next()
+ switch tt {
+ case html.ErrorToken:
+ return z.Err()
+ case html.TextToken:
+ if depth > 0 {
+ // emitBytes should copy the []byte it receives,
+ // if it doesn't process it immediately.
+ emitBytes(z.Text())
+ }
+ case html.StartTagToken, html.EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == html.StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+
+Parsing is done by calling Parse with an io.Reader, which returns the root of
+the parse tree (the document element) as a *Node. It is the caller's
+responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
+example, to process each anchor node in depth-first order:
+
+ doc, err := html.Parse(r)
+ if err != nil {
+ // ...
+ }
+ for n := range doc.Descendants() {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ // Do something with n...
+ }
+ }
+
+The relevant specifications include:
+https://html.spec.whatwg.org/multipage/syntax.html and
+https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser, which implement the
+tokenization, and tokenization and tree construction stages of the WHATWG HTML
+parsing specification respectively. While the tokenizer parses and normalizes
+individual HTML tokens, only the parser constructs the DOM tree from the
+tokenized HTML, as described in the tree construction stage of the
+specification, dynamically modifying or extending the document's DOM tree.
+
+If your use case requires semantically well-formed HTML documents, as defined by
+the WHATWG specification, the parser should be used rather than the tokenizer.
+
+In security contexts, if trust decisions are being made using the tokenized or
+parsed content, the input must be re-serialized (for instance by using Render or
+Token.String) in order for those trust decisions to hold, as the process of
+tokenization or parsing may alter the content.
+*/
+package html // import "golang.org/x/net/html"
+
+// The tokenization algorithm implemented by this package is not a line-by-line
+// transliteration of the relatively verbose state-machine in the WHATWG
+// specification. A more direct approach is used instead, where the program
+// counter implies the state, such as whether it is tokenizing a tag or a text
+// node. Specification compliance is verified by checking expected and actual
+// outputs over a test suite rather than aiming for algorithmic fidelity.
+
+// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
+// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
new file mode 100644
index 0000000..bca3ae9
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doctype.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+// parseDoctype parses the data from a DoctypeToken into a name,
+// public identifier, and system identifier. It returns a Node whose Type
+// is DoctypeNode, whose Data is the name, and which has attributes
+// named "system" and "public" for the two identifiers if they were present.
+// quirks is whether the document should be parsed in "quirks mode".
+func parseDoctype(s string) (n *Node, quirks bool) {
+ n = &Node{Type: DoctypeNode}
+
+ // Find the name.
+ space := strings.IndexAny(s, whitespace)
+ if space == -1 {
+ space = len(s)
+ }
+ n.Data = s[:space]
+ // The comparison to "html" is case-sensitive.
+ if n.Data != "html" {
+ quirks = true
+ }
+ n.Data = strings.ToLower(n.Data)
+ s = strings.TrimLeft(s[space:], whitespace)
+
+ if len(s) < 6 {
+ // It can't start with "PUBLIC" or "SYSTEM".
+ // Ignore the rest of the string.
+ return n, quirks || s != ""
+ }
+
+ key := strings.ToLower(s[:6])
+ s = s[6:]
+ for key == "public" || key == "system" {
+ s = strings.TrimLeft(s, whitespace)
+ if s == "" {
+ break
+ }
+ quote := s[0]
+ if quote != '"' && quote != '\'' {
+ break
+ }
+ s = s[1:]
+ q := strings.IndexRune(s, rune(quote))
+ var id string
+ if q == -1 {
+ id = s
+ s = ""
+ } else {
+ id = s[:q]
+ s = s[q+1:]
+ }
+ n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
+ if key == "public" {
+ key = "system"
+ } else {
+ key = ""
+ }
+ }
+
+ if key != "" || s != "" {
+ quirks = true
+ } else if len(n.Attr) > 0 {
+ if n.Attr[0].Key == "public" {
+ public := strings.ToLower(n.Attr[0].Val)
+ switch public {
+ case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
+ quirks = true
+ default:
+ for _, q := range quirkyIDs {
+ if strings.HasPrefix(public, q) {
+ quirks = true
+ break
+ }
+ }
+ }
+ // The following two public IDs only cause quirks mode if there is no system ID.
+ if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
+ strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
+ quirks = true
+ }
+ }
+ if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
+ strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") {
+ quirks = true
+ }
+ }
+
+ return n, quirks
+}
+
+// quirkyIDs is a list of public doctype identifiers that cause a document
+// to be interpreted in quirks mode. The identifiers should be in lower case.
+var quirkyIDs = []string{
+ "+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//",
+}
diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
new file mode 100644
index 0000000..b628880
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity.go
@@ -0,0 +1,2253 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// All entities that do not end with ';' are 6 or fewer bytes long.
+const longestEntityWithoutSemicolon = 6
+
+// entity is a map from HTML entity names to their values. The semicolon matters:
+// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
+// lists both "amp" and "amp;" as two separate entries.
+//
+// Note that the HTML5 list is larger than the HTML4 list at
+// http://www.w3.org/TR/html4/sgml/entities.html
+var entity = map[string]rune{
+ "AElig;": '\U000000C6',
+ "AMP;": '\U00000026',
+ "Aacute;": '\U000000C1',
+ "Abreve;": '\U00000102',
+ "Acirc;": '\U000000C2',
+ "Acy;": '\U00000410',
+ "Afr;": '\U0001D504',
+ "Agrave;": '\U000000C0',
+ "Alpha;": '\U00000391',
+ "Amacr;": '\U00000100',
+ "And;": '\U00002A53',
+ "Aogon;": '\U00000104',
+ "Aopf;": '\U0001D538',
+ "ApplyFunction;": '\U00002061',
+ "Aring;": '\U000000C5',
+ "Ascr;": '\U0001D49C',
+ "Assign;": '\U00002254',
+ "Atilde;": '\U000000C3',
+ "Auml;": '\U000000C4',
+ "Backslash;": '\U00002216',
+ "Barv;": '\U00002AE7',
+ "Barwed;": '\U00002306',
+ "Bcy;": '\U00000411',
+ "Because;": '\U00002235',
+ "Bernoullis;": '\U0000212C',
+ "Beta;": '\U00000392',
+ "Bfr;": '\U0001D505',
+ "Bopf;": '\U0001D539',
+ "Breve;": '\U000002D8',
+ "Bscr;": '\U0000212C',
+ "Bumpeq;": '\U0000224E',
+ "CHcy;": '\U00000427',
+ "COPY;": '\U000000A9',
+ "Cacute;": '\U00000106',
+ "Cap;": '\U000022D2',
+ "CapitalDifferentialD;": '\U00002145',
+ "Cayleys;": '\U0000212D',
+ "Ccaron;": '\U0000010C',
+ "Ccedil;": '\U000000C7',
+ "Ccirc;": '\U00000108',
+ "Cconint;": '\U00002230',
+ "Cdot;": '\U0000010A',
+ "Cedilla;": '\U000000B8',
+ "CenterDot;": '\U000000B7',
+ "Cfr;": '\U0000212D',
+ "Chi;": '\U000003A7',
+ "CircleDot;": '\U00002299',
+ "CircleMinus;": '\U00002296',
+ "CirclePlus;": '\U00002295',
+ "CircleTimes;": '\U00002297',
+ "ClockwiseContourIntegral;": '\U00002232',
+ "CloseCurlyDoubleQuote;": '\U0000201D',
+ "CloseCurlyQuote;": '\U00002019',
+ "Colon;": '\U00002237',
+ "Colone;": '\U00002A74',
+ "Congruent;": '\U00002261',
+ "Conint;": '\U0000222F',
+ "ContourIntegral;": '\U0000222E',
+ "Copf;": '\U00002102',
+ "Coproduct;": '\U00002210',
+ "CounterClockwiseContourIntegral;": '\U00002233',
+ "Cross;": '\U00002A2F',
+ "Cscr;": '\U0001D49E',
+ "Cup;": '\U000022D3',
+ "CupCap;": '\U0000224D',
+ "DD;": '\U00002145',
+ "DDotrahd;": '\U00002911',
+ "DJcy;": '\U00000402',
+ "DScy;": '\U00000405',
+ "DZcy;": '\U0000040F',
+ "Dagger;": '\U00002021',
+ "Darr;": '\U000021A1',
+ "Dashv;": '\U00002AE4',
+ "Dcaron;": '\U0000010E',
+ "Dcy;": '\U00000414',
+ "Del;": '\U00002207',
+ "Delta;": '\U00000394',
+ "Dfr;": '\U0001D507',
+ "DiacriticalAcute;": '\U000000B4',
+ "DiacriticalDot;": '\U000002D9',
+ "DiacriticalDoubleAcute;": '\U000002DD',
+ "DiacriticalGrave;": '\U00000060',
+ "DiacriticalTilde;": '\U000002DC',
+ "Diamond;": '\U000022C4',
+ "DifferentialD;": '\U00002146',
+ "Dopf;": '\U0001D53B',
+ "Dot;": '\U000000A8',
+ "DotDot;": '\U000020DC',
+ "DotEqual;": '\U00002250',
+ "DoubleContourIntegral;": '\U0000222F',
+ "DoubleDot;": '\U000000A8',
+ "DoubleDownArrow;": '\U000021D3',
+ "DoubleLeftArrow;": '\U000021D0',
+ "DoubleLeftRightArrow;": '\U000021D4',
+ "DoubleLeftTee;": '\U00002AE4',
+ "DoubleLongLeftArrow;": '\U000027F8',
+ "DoubleLongLeftRightArrow;": '\U000027FA',
+ "DoubleLongRightArrow;": '\U000027F9',
+ "DoubleRightArrow;": '\U000021D2',
+ "DoubleRightTee;": '\U000022A8',
+ "DoubleUpArrow;": '\U000021D1',
+ "DoubleUpDownArrow;": '\U000021D5',
+ "DoubleVerticalBar;": '\U00002225',
+ "DownArrow;": '\U00002193',
+ "DownArrowBar;": '\U00002913',
+ "DownArrowUpArrow;": '\U000021F5',
+ "DownBreve;": '\U00000311',
+ "DownLeftRightVector;": '\U00002950',
+ "DownLeftTeeVector;": '\U0000295E',
+ "DownLeftVector;": '\U000021BD',
+ "DownLeftVectorBar;": '\U00002956',
+ "DownRightTeeVector;": '\U0000295F',
+ "DownRightVector;": '\U000021C1',
+ "DownRightVectorBar;": '\U00002957',
+ "DownTee;": '\U000022A4',
+ "DownTeeArrow;": '\U000021A7',
+ "Downarrow;": '\U000021D3',
+ "Dscr;": '\U0001D49F',
+ "Dstrok;": '\U00000110',
+ "ENG;": '\U0000014A',
+ "ETH;": '\U000000D0',
+ "Eacute;": '\U000000C9',
+ "Ecaron;": '\U0000011A',
+ "Ecirc;": '\U000000CA',
+ "Ecy;": '\U0000042D',
+ "Edot;": '\U00000116',
+ "Efr;": '\U0001D508',
+ "Egrave;": '\U000000C8',
+ "Element;": '\U00002208',
+ "Emacr;": '\U00000112',
+ "EmptySmallSquare;": '\U000025FB',
+ "EmptyVerySmallSquare;": '\U000025AB',
+ "Eogon;": '\U00000118',
+ "Eopf;": '\U0001D53C',
+ "Epsilon;": '\U00000395',
+ "Equal;": '\U00002A75',
+ "EqualTilde;": '\U00002242',
+ "Equilibrium;": '\U000021CC',
+ "Escr;": '\U00002130',
+ "Esim;": '\U00002A73',
+ "Eta;": '\U00000397',
+ "Euml;": '\U000000CB',
+ "Exists;": '\U00002203',
+ "ExponentialE;": '\U00002147',
+ "Fcy;": '\U00000424',
+ "Ffr;": '\U0001D509',
+ "FilledSmallSquare;": '\U000025FC',
+ "FilledVerySmallSquare;": '\U000025AA',
+ "Fopf;": '\U0001D53D',
+ "ForAll;": '\U00002200',
+ "Fouriertrf;": '\U00002131',
+ "Fscr;": '\U00002131',
+ "GJcy;": '\U00000403',
+ "GT;": '\U0000003E',
+ "Gamma;": '\U00000393',
+ "Gammad;": '\U000003DC',
+ "Gbreve;": '\U0000011E',
+ "Gcedil;": '\U00000122',
+ "Gcirc;": '\U0000011C',
+ "Gcy;": '\U00000413',
+ "Gdot;": '\U00000120',
+ "Gfr;": '\U0001D50A',
+ "Gg;": '\U000022D9',
+ "Gopf;": '\U0001D53E',
+ "GreaterEqual;": '\U00002265',
+ "GreaterEqualLess;": '\U000022DB',
+ "GreaterFullEqual;": '\U00002267',
+ "GreaterGreater;": '\U00002AA2',
+ "GreaterLess;": '\U00002277',
+ "GreaterSlantEqual;": '\U00002A7E',
+ "GreaterTilde;": '\U00002273',
+ "Gscr;": '\U0001D4A2',
+ "Gt;": '\U0000226B',
+ "HARDcy;": '\U0000042A',
+ "Hacek;": '\U000002C7',
+ "Hat;": '\U0000005E',
+ "Hcirc;": '\U00000124',
+ "Hfr;": '\U0000210C',
+ "HilbertSpace;": '\U0000210B',
+ "Hopf;": '\U0000210D',
+ "HorizontalLine;": '\U00002500',
+ "Hscr;": '\U0000210B',
+ "Hstrok;": '\U00000126',
+ "HumpDownHump;": '\U0000224E',
+ "HumpEqual;": '\U0000224F',
+ "IEcy;": '\U00000415',
+ "IJlig;": '\U00000132',
+ "IOcy;": '\U00000401',
+ "Iacute;": '\U000000CD',
+ "Icirc;": '\U000000CE',
+ "Icy;": '\U00000418',
+ "Idot;": '\U00000130',
+ "Ifr;": '\U00002111',
+ "Igrave;": '\U000000CC',
+ "Im;": '\U00002111',
+ "Imacr;": '\U0000012A',
+ "ImaginaryI;": '\U00002148',
+ "Implies;": '\U000021D2',
+ "Int;": '\U0000222C',
+ "Integral;": '\U0000222B',
+ "Intersection;": '\U000022C2',
+ "InvisibleComma;": '\U00002063',
+ "InvisibleTimes;": '\U00002062',
+ "Iogon;": '\U0000012E',
+ "Iopf;": '\U0001D540',
+ "Iota;": '\U00000399',
+ "Iscr;": '\U00002110',
+ "Itilde;": '\U00000128',
+ "Iukcy;": '\U00000406',
+ "Iuml;": '\U000000CF',
+ "Jcirc;": '\U00000134',
+ "Jcy;": '\U00000419',
+ "Jfr;": '\U0001D50D',
+ "Jopf;": '\U0001D541',
+ "Jscr;": '\U0001D4A5',
+ "Jsercy;": '\U00000408',
+ "Jukcy;": '\U00000404',
+ "KHcy;": '\U00000425',
+ "KJcy;": '\U0000040C',
+ "Kappa;": '\U0000039A',
+ "Kcedil;": '\U00000136',
+ "Kcy;": '\U0000041A',
+ "Kfr;": '\U0001D50E',
+ "Kopf;": '\U0001D542',
+ "Kscr;": '\U0001D4A6',
+ "LJcy;": '\U00000409',
+ "LT;": '\U0000003C',
+ "Lacute;": '\U00000139',
+ "Lambda;": '\U0000039B',
+ "Lang;": '\U000027EA',
+ "Laplacetrf;": '\U00002112',
+ "Larr;": '\U0000219E',
+ "Lcaron;": '\U0000013D',
+ "Lcedil;": '\U0000013B',
+ "Lcy;": '\U0000041B',
+ "LeftAngleBracket;": '\U000027E8',
+ "LeftArrow;": '\U00002190',
+ "LeftArrowBar;": '\U000021E4',
+ "LeftArrowRightArrow;": '\U000021C6',
+ "LeftCeiling;": '\U00002308',
+ "LeftDoubleBracket;": '\U000027E6',
+ "LeftDownTeeVector;": '\U00002961',
+ "LeftDownVector;": '\U000021C3',
+ "LeftDownVectorBar;": '\U00002959',
+ "LeftFloor;": '\U0000230A',
+ "LeftRightArrow;": '\U00002194',
+ "LeftRightVector;": '\U0000294E',
+ "LeftTee;": '\U000022A3',
+ "LeftTeeArrow;": '\U000021A4',
+ "LeftTeeVector;": '\U0000295A',
+ "LeftTriangle;": '\U000022B2',
+ "LeftTriangleBar;": '\U000029CF',
+ "LeftTriangleEqual;": '\U000022B4',
+ "LeftUpDownVector;": '\U00002951',
+ "LeftUpTeeVector;": '\U00002960',
+ "LeftUpVector;": '\U000021BF',
+ "LeftUpVectorBar;": '\U00002958',
+ "LeftVector;": '\U000021BC',
+ "LeftVectorBar;": '\U00002952',
+ "Leftarrow;": '\U000021D0',
+ "Leftrightarrow;": '\U000021D4',
+ "LessEqualGreater;": '\U000022DA',
+ "LessFullEqual;": '\U00002266',
+ "LessGreater;": '\U00002276',
+ "LessLess;": '\U00002AA1',
+ "LessSlantEqual;": '\U00002A7D',
+ "LessTilde;": '\U00002272',
+ "Lfr;": '\U0001D50F',
+ "Ll;": '\U000022D8',
+ "Lleftarrow;": '\U000021DA',
+ "Lmidot;": '\U0000013F',
+ "LongLeftArrow;": '\U000027F5',
+ "LongLeftRightArrow;": '\U000027F7',
+ "LongRightArrow;": '\U000027F6',
+ "Longleftarrow;": '\U000027F8',
+ "Longleftrightarrow;": '\U000027FA',
+ "Longrightarrow;": '\U000027F9',
+ "Lopf;": '\U0001D543',
+ "LowerLeftArrow;": '\U00002199',
+ "LowerRightArrow;": '\U00002198',
+ "Lscr;": '\U00002112',
+ "Lsh;": '\U000021B0',
+ "Lstrok;": '\U00000141',
+ "Lt;": '\U0000226A',
+ "Map;": '\U00002905',
+ "Mcy;": '\U0000041C',
+ "MediumSpace;": '\U0000205F',
+ "Mellintrf;": '\U00002133',
+ "Mfr;": '\U0001D510',
+ "MinusPlus;": '\U00002213',
+ "Mopf;": '\U0001D544',
+ "Mscr;": '\U00002133',
+ "Mu;": '\U0000039C',
+ "NJcy;": '\U0000040A',
+ "Nacute;": '\U00000143',
+ "Ncaron;": '\U00000147',
+ "Ncedil;": '\U00000145',
+ "Ncy;": '\U0000041D',
+ "NegativeMediumSpace;": '\U0000200B',
+ "NegativeThickSpace;": '\U0000200B',
+ "NegativeThinSpace;": '\U0000200B',
+ "NegativeVeryThinSpace;": '\U0000200B',
+ "NestedGreaterGreater;": '\U0000226B',
+ "NestedLessLess;": '\U0000226A',
+ "NewLine;": '\U0000000A',
+ "Nfr;": '\U0001D511',
+ "NoBreak;": '\U00002060',
+ "NonBreakingSpace;": '\U000000A0',
+ "Nopf;": '\U00002115',
+ "Not;": '\U00002AEC',
+ "NotCongruent;": '\U00002262',
+ "NotCupCap;": '\U0000226D',
+ "NotDoubleVerticalBar;": '\U00002226',
+ "NotElement;": '\U00002209',
+ "NotEqual;": '\U00002260',
+ "NotExists;": '\U00002204',
+ "NotGreater;": '\U0000226F',
+ "NotGreaterEqual;": '\U00002271',
+ "NotGreaterLess;": '\U00002279',
+ "NotGreaterTilde;": '\U00002275',
+ "NotLeftTriangle;": '\U000022EA',
+ "NotLeftTriangleEqual;": '\U000022EC',
+ "NotLess;": '\U0000226E',
+ "NotLessEqual;": '\U00002270',
+ "NotLessGreater;": '\U00002278',
+ "NotLessTilde;": '\U00002274',
+ "NotPrecedes;": '\U00002280',
+ "NotPrecedesSlantEqual;": '\U000022E0',
+ "NotReverseElement;": '\U0000220C',
+ "NotRightTriangle;": '\U000022EB',
+ "NotRightTriangleEqual;": '\U000022ED',
+ "NotSquareSubsetEqual;": '\U000022E2',
+ "NotSquareSupersetEqual;": '\U000022E3',
+ "NotSubsetEqual;": '\U00002288',
+ "NotSucceeds;": '\U00002281',
+ "NotSucceedsSlantEqual;": '\U000022E1',
+ "NotSupersetEqual;": '\U00002289',
+ "NotTilde;": '\U00002241',
+ "NotTildeEqual;": '\U00002244',
+ "NotTildeFullEqual;": '\U00002247',
+ "NotTildeTilde;": '\U00002249',
+ "NotVerticalBar;": '\U00002224',
+ "Nscr;": '\U0001D4A9',
+ "Ntilde;": '\U000000D1',
+ "Nu;": '\U0000039D',
+ "OElig;": '\U00000152',
+ "Oacute;": '\U000000D3',
+ "Ocirc;": '\U000000D4',
+ "Ocy;": '\U0000041E',
+ "Odblac;": '\U00000150',
+ "Ofr;": '\U0001D512',
+ "Ograve;": '\U000000D2',
+ "Omacr;": '\U0000014C',
+ "Omega;": '\U000003A9',
+ "Omicron;": '\U0000039F',
+ "Oopf;": '\U0001D546',
+ "OpenCurlyDoubleQuote;": '\U0000201C',
+ "OpenCurlyQuote;": '\U00002018',
+ "Or;": '\U00002A54',
+ "Oscr;": '\U0001D4AA',
+ "Oslash;": '\U000000D8',
+ "Otilde;": '\U000000D5',
+ "Otimes;": '\U00002A37',
+ "Ouml;": '\U000000D6',
+ "OverBar;": '\U0000203E',
+ "OverBrace;": '\U000023DE',
+ "OverBracket;": '\U000023B4',
+ "OverParenthesis;": '\U000023DC',
+ "PartialD;": '\U00002202',
+ "Pcy;": '\U0000041F',
+ "Pfr;": '\U0001D513',
+ "Phi;": '\U000003A6',
+ "Pi;": '\U000003A0',
+ "PlusMinus;": '\U000000B1',
+ "Poincareplane;": '\U0000210C',
+ "Popf;": '\U00002119',
+ "Pr;": '\U00002ABB',
+ "Precedes;": '\U0000227A',
+ "PrecedesEqual;": '\U00002AAF',
+ "PrecedesSlantEqual;": '\U0000227C',
+ "PrecedesTilde;": '\U0000227E',
+ "Prime;": '\U00002033',
+ "Product;": '\U0000220F',
+ "Proportion;": '\U00002237',
+ "Proportional;": '\U0000221D',
+ "Pscr;": '\U0001D4AB',
+ "Psi;": '\U000003A8',
+ "QUOT;": '\U00000022',
+ "Qfr;": '\U0001D514',
+ "Qopf;": '\U0000211A',
+ "Qscr;": '\U0001D4AC',
+ "RBarr;": '\U00002910',
+ "REG;": '\U000000AE',
+ "Racute;": '\U00000154',
+ "Rang;": '\U000027EB',
+ "Rarr;": '\U000021A0',
+ "Rarrtl;": '\U00002916',
+ "Rcaron;": '\U00000158',
+ "Rcedil;": '\U00000156',
+ "Rcy;": '\U00000420',
+ "Re;": '\U0000211C',
+ "ReverseElement;": '\U0000220B',
+ "ReverseEquilibrium;": '\U000021CB',
+ "ReverseUpEquilibrium;": '\U0000296F',
+ "Rfr;": '\U0000211C',
+ "Rho;": '\U000003A1',
+ "RightAngleBracket;": '\U000027E9',
+ "RightArrow;": '\U00002192',
+ "RightArrowBar;": '\U000021E5',
+ "RightArrowLeftArrow;": '\U000021C4',
+ "RightCeiling;": '\U00002309',
+ "RightDoubleBracket;": '\U000027E7',
+ "RightDownTeeVector;": '\U0000295D',
+ "RightDownVector;": '\U000021C2',
+ "RightDownVectorBar;": '\U00002955',
+ "RightFloor;": '\U0000230B',
+ "RightTee;": '\U000022A2',
+ "RightTeeArrow;": '\U000021A6',
+ "RightTeeVector;": '\U0000295B',
+ "RightTriangle;": '\U000022B3',
+ "RightTriangleBar;": '\U000029D0',
+ "RightTriangleEqual;": '\U000022B5',
+ "RightUpDownVector;": '\U0000294F',
+ "RightUpTeeVector;": '\U0000295C',
+ "RightUpVector;": '\U000021BE',
+ "RightUpVectorBar;": '\U00002954',
+ "RightVector;": '\U000021C0',
+ "RightVectorBar;": '\U00002953',
+ "Rightarrow;": '\U000021D2',
+ "Ropf;": '\U0000211D',
+ "RoundImplies;": '\U00002970',
+ "Rrightarrow;": '\U000021DB',
+ "Rscr;": '\U0000211B',
+ "Rsh;": '\U000021B1',
+ "RuleDelayed;": '\U000029F4',
+ "SHCHcy;": '\U00000429',
+ "SHcy;": '\U00000428',
+ "SOFTcy;": '\U0000042C',
+ "Sacute;": '\U0000015A',
+ "Sc;": '\U00002ABC',
+ "Scaron;": '\U00000160',
+ "Scedil;": '\U0000015E',
+ "Scirc;": '\U0000015C',
+ "Scy;": '\U00000421',
+ "Sfr;": '\U0001D516',
+ "ShortDownArrow;": '\U00002193',
+ "ShortLeftArrow;": '\U00002190',
+ "ShortRightArrow;": '\U00002192',
+ "ShortUpArrow;": '\U00002191',
+ "Sigma;": '\U000003A3',
+ "SmallCircle;": '\U00002218',
+ "Sopf;": '\U0001D54A',
+ "Sqrt;": '\U0000221A',
+ "Square;": '\U000025A1',
+ "SquareIntersection;": '\U00002293',
+ "SquareSubset;": '\U0000228F',
+ "SquareSubsetEqual;": '\U00002291',
+ "SquareSuperset;": '\U00002290',
+ "SquareSupersetEqual;": '\U00002292',
+ "SquareUnion;": '\U00002294',
+ "Sscr;": '\U0001D4AE',
+ "Star;": '\U000022C6',
+ "Sub;": '\U000022D0',
+ "Subset;": '\U000022D0',
+ "SubsetEqual;": '\U00002286',
+ "Succeeds;": '\U0000227B',
+ "SucceedsEqual;": '\U00002AB0',
+ "SucceedsSlantEqual;": '\U0000227D',
+ "SucceedsTilde;": '\U0000227F',
+ "SuchThat;": '\U0000220B',
+ "Sum;": '\U00002211',
+ "Sup;": '\U000022D1',
+ "Superset;": '\U00002283',
+ "SupersetEqual;": '\U00002287',
+ "Supset;": '\U000022D1',
+ "THORN;": '\U000000DE',
+ "TRADE;": '\U00002122',
+ "TSHcy;": '\U0000040B',
+ "TScy;": '\U00000426',
+ "Tab;": '\U00000009',
+ "Tau;": '\U000003A4',
+ "Tcaron;": '\U00000164',
+ "Tcedil;": '\U00000162',
+ "Tcy;": '\U00000422',
+ "Tfr;": '\U0001D517',
+ "Therefore;": '\U00002234',
+ "Theta;": '\U00000398',
+ "ThinSpace;": '\U00002009',
+ "Tilde;": '\U0000223C',
+ "TildeEqual;": '\U00002243',
+ "TildeFullEqual;": '\U00002245',
+ "TildeTilde;": '\U00002248',
+ "Topf;": '\U0001D54B',
+ "TripleDot;": '\U000020DB',
+ "Tscr;": '\U0001D4AF',
+ "Tstrok;": '\U00000166',
+ "Uacute;": '\U000000DA',
+ "Uarr;": '\U0000219F',
+ "Uarrocir;": '\U00002949',
+ "Ubrcy;": '\U0000040E',
+ "Ubreve;": '\U0000016C',
+ "Ucirc;": '\U000000DB',
+ "Ucy;": '\U00000423',
+ "Udblac;": '\U00000170',
+ "Ufr;": '\U0001D518',
+ "Ugrave;": '\U000000D9',
+ "Umacr;": '\U0000016A',
+ "UnderBar;": '\U0000005F',
+ "UnderBrace;": '\U000023DF',
+ "UnderBracket;": '\U000023B5',
+ "UnderParenthesis;": '\U000023DD',
+ "Union;": '\U000022C3',
+ "UnionPlus;": '\U0000228E',
+ "Uogon;": '\U00000172',
+ "Uopf;": '\U0001D54C',
+ "UpArrow;": '\U00002191',
+ "UpArrowBar;": '\U00002912',
+ "UpArrowDownArrow;": '\U000021C5',
+ "UpDownArrow;": '\U00002195',
+ "UpEquilibrium;": '\U0000296E',
+ "UpTee;": '\U000022A5',
+ "UpTeeArrow;": '\U000021A5',
+ "Uparrow;": '\U000021D1',
+ "Updownarrow;": '\U000021D5',
+ "UpperLeftArrow;": '\U00002196',
+ "UpperRightArrow;": '\U00002197',
+ "Upsi;": '\U000003D2',
+ "Upsilon;": '\U000003A5',
+ "Uring;": '\U0000016E',
+ "Uscr;": '\U0001D4B0',
+ "Utilde;": '\U00000168',
+ "Uuml;": '\U000000DC',
+ "VDash;": '\U000022AB',
+ "Vbar;": '\U00002AEB',
+ "Vcy;": '\U00000412',
+ "Vdash;": '\U000022A9',
+ "Vdashl;": '\U00002AE6',
+ "Vee;": '\U000022C1',
+ "Verbar;": '\U00002016',
+ "Vert;": '\U00002016',
+ "VerticalBar;": '\U00002223',
+ "VerticalLine;": '\U0000007C',
+ "VerticalSeparator;": '\U00002758',
+ "VerticalTilde;": '\U00002240',
+ "VeryThinSpace;": '\U0000200A',
+ "Vfr;": '\U0001D519',
+ "Vopf;": '\U0001D54D',
+ "Vscr;": '\U0001D4B1',
+ "Vvdash;": '\U000022AA',
+ "Wcirc;": '\U00000174',
+ "Wedge;": '\U000022C0',
+ "Wfr;": '\U0001D51A',
+ "Wopf;": '\U0001D54E',
+ "Wscr;": '\U0001D4B2',
+ "Xfr;": '\U0001D51B',
+ "Xi;": '\U0000039E',
+ "Xopf;": '\U0001D54F',
+ "Xscr;": '\U0001D4B3',
+ "YAcy;": '\U0000042F',
+ "YIcy;": '\U00000407',
+ "YUcy;": '\U0000042E',
+ "Yacute;": '\U000000DD',
+ "Ycirc;": '\U00000176',
+ "Ycy;": '\U0000042B',
+ "Yfr;": '\U0001D51C',
+ "Yopf;": '\U0001D550',
+ "Yscr;": '\U0001D4B4',
+ "Yuml;": '\U00000178',
+ "ZHcy;": '\U00000416',
+ "Zacute;": '\U00000179',
+ "Zcaron;": '\U0000017D',
+ "Zcy;": '\U00000417',
+ "Zdot;": '\U0000017B',
+ "ZeroWidthSpace;": '\U0000200B',
+ "Zeta;": '\U00000396',
+ "Zfr;": '\U00002128',
+ "Zopf;": '\U00002124',
+ "Zscr;": '\U0001D4B5',
+ "aacute;": '\U000000E1',
+ "abreve;": '\U00000103',
+ "ac;": '\U0000223E',
+ "acd;": '\U0000223F',
+ "acirc;": '\U000000E2',
+ "acute;": '\U000000B4',
+ "acy;": '\U00000430',
+ "aelig;": '\U000000E6',
+ "af;": '\U00002061',
+ "afr;": '\U0001D51E',
+ "agrave;": '\U000000E0',
+ "alefsym;": '\U00002135',
+ "aleph;": '\U00002135',
+ "alpha;": '\U000003B1',
+ "amacr;": '\U00000101',
+ "amalg;": '\U00002A3F',
+ "amp;": '\U00000026',
+ "and;": '\U00002227',
+ "andand;": '\U00002A55',
+ "andd;": '\U00002A5C',
+ "andslope;": '\U00002A58',
+ "andv;": '\U00002A5A',
+ "ang;": '\U00002220',
+ "ange;": '\U000029A4',
+ "angle;": '\U00002220',
+ "angmsd;": '\U00002221',
+ "angmsdaa;": '\U000029A8',
+ "angmsdab;": '\U000029A9',
+ "angmsdac;": '\U000029AA',
+ "angmsdad;": '\U000029AB',
+ "angmsdae;": '\U000029AC',
+ "angmsdaf;": '\U000029AD',
+ "angmsdag;": '\U000029AE',
+ "angmsdah;": '\U000029AF',
+ "angrt;": '\U0000221F',
+ "angrtvb;": '\U000022BE',
+ "angrtvbd;": '\U0000299D',
+ "angsph;": '\U00002222',
+ "angst;": '\U000000C5',
+ "angzarr;": '\U0000237C',
+ "aogon;": '\U00000105',
+ "aopf;": '\U0001D552',
+ "ap;": '\U00002248',
+ "apE;": '\U00002A70',
+ "apacir;": '\U00002A6F',
+ "ape;": '\U0000224A',
+ "apid;": '\U0000224B',
+ "apos;": '\U00000027',
+ "approx;": '\U00002248',
+ "approxeq;": '\U0000224A',
+ "aring;": '\U000000E5',
+ "ascr;": '\U0001D4B6',
+ "ast;": '\U0000002A',
+ "asymp;": '\U00002248',
+ "asympeq;": '\U0000224D',
+ "atilde;": '\U000000E3',
+ "auml;": '\U000000E4',
+ "awconint;": '\U00002233',
+ "awint;": '\U00002A11',
+ "bNot;": '\U00002AED',
+ "backcong;": '\U0000224C',
+ "backepsilon;": '\U000003F6',
+ "backprime;": '\U00002035',
+ "backsim;": '\U0000223D',
+ "backsimeq;": '\U000022CD',
+ "barvee;": '\U000022BD',
+ "barwed;": '\U00002305',
+ "barwedge;": '\U00002305',
+ "bbrk;": '\U000023B5',
+ "bbrktbrk;": '\U000023B6',
+ "bcong;": '\U0000224C',
+ "bcy;": '\U00000431',
+ "bdquo;": '\U0000201E',
+ "becaus;": '\U00002235',
+ "because;": '\U00002235',
+ "bemptyv;": '\U000029B0',
+ "bepsi;": '\U000003F6',
+ "bernou;": '\U0000212C',
+ "beta;": '\U000003B2',
+ "beth;": '\U00002136',
+ "between;": '\U0000226C',
+ "bfr;": '\U0001D51F',
+ "bigcap;": '\U000022C2',
+ "bigcirc;": '\U000025EF',
+ "bigcup;": '\U000022C3',
+ "bigodot;": '\U00002A00',
+ "bigoplus;": '\U00002A01',
+ "bigotimes;": '\U00002A02',
+ "bigsqcup;": '\U00002A06',
+ "bigstar;": '\U00002605',
+ "bigtriangledown;": '\U000025BD',
+ "bigtriangleup;": '\U000025B3',
+ "biguplus;": '\U00002A04',
+ "bigvee;": '\U000022C1',
+ "bigwedge;": '\U000022C0',
+ "bkarow;": '\U0000290D',
+ "blacklozenge;": '\U000029EB',
+ "blacksquare;": '\U000025AA',
+ "blacktriangle;": '\U000025B4',
+ "blacktriangledown;": '\U000025BE',
+ "blacktriangleleft;": '\U000025C2',
+ "blacktriangleright;": '\U000025B8',
+ "blank;": '\U00002423',
+ "blk12;": '\U00002592',
+ "blk14;": '\U00002591',
+ "blk34;": '\U00002593',
+ "block;": '\U00002588',
+ "bnot;": '\U00002310',
+ "bopf;": '\U0001D553',
+ "bot;": '\U000022A5',
+ "bottom;": '\U000022A5',
+ "bowtie;": '\U000022C8',
+ "boxDL;": '\U00002557',
+ "boxDR;": '\U00002554',
+ "boxDl;": '\U00002556',
+ "boxDr;": '\U00002553',
+ "boxH;": '\U00002550',
+ "boxHD;": '\U00002566',
+ "boxHU;": '\U00002569',
+ "boxHd;": '\U00002564',
+ "boxHu;": '\U00002567',
+ "boxUL;": '\U0000255D',
+ "boxUR;": '\U0000255A',
+ "boxUl;": '\U0000255C',
+ "boxUr;": '\U00002559',
+ "boxV;": '\U00002551',
+ "boxVH;": '\U0000256C',
+ "boxVL;": '\U00002563',
+ "boxVR;": '\U00002560',
+ "boxVh;": '\U0000256B',
+ "boxVl;": '\U00002562',
+ "boxVr;": '\U0000255F',
+ "boxbox;": '\U000029C9',
+ "boxdL;": '\U00002555',
+ "boxdR;": '\U00002552',
+ "boxdl;": '\U00002510',
+ "boxdr;": '\U0000250C',
+ "boxh;": '\U00002500',
+ "boxhD;": '\U00002565',
+ "boxhU;": '\U00002568',
+ "boxhd;": '\U0000252C',
+ "boxhu;": '\U00002534',
+ "boxminus;": '\U0000229F',
+ "boxplus;": '\U0000229E',
+ "boxtimes;": '\U000022A0',
+ "boxuL;": '\U0000255B',
+ "boxuR;": '\U00002558',
+ "boxul;": '\U00002518',
+ "boxur;": '\U00002514',
+ "boxv;": '\U00002502',
+ "boxvH;": '\U0000256A',
+ "boxvL;": '\U00002561',
+ "boxvR;": '\U0000255E',
+ "boxvh;": '\U0000253C',
+ "boxvl;": '\U00002524',
+ "boxvr;": '\U0000251C',
+ "bprime;": '\U00002035',
+ "breve;": '\U000002D8',
+ "brvbar;": '\U000000A6',
+ "bscr;": '\U0001D4B7',
+ "bsemi;": '\U0000204F',
+ "bsim;": '\U0000223D',
+ "bsime;": '\U000022CD',
+ "bsol;": '\U0000005C',
+ "bsolb;": '\U000029C5',
+ "bsolhsub;": '\U000027C8',
+ "bull;": '\U00002022',
+ "bullet;": '\U00002022',
+ "bump;": '\U0000224E',
+ "bumpE;": '\U00002AAE',
+ "bumpe;": '\U0000224F',
+ "bumpeq;": '\U0000224F',
+ "cacute;": '\U00000107',
+ "cap;": '\U00002229',
+ "capand;": '\U00002A44',
+ "capbrcup;": '\U00002A49',
+ "capcap;": '\U00002A4B',
+ "capcup;": '\U00002A47',
+ "capdot;": '\U00002A40',
+ "caret;": '\U00002041',
+ "caron;": '\U000002C7',
+ "ccaps;": '\U00002A4D',
+ "ccaron;": '\U0000010D',
+ "ccedil;": '\U000000E7',
+ "ccirc;": '\U00000109',
+ "ccups;": '\U00002A4C',
+ "ccupssm;": '\U00002A50',
+ "cdot;": '\U0000010B',
+ "cedil;": '\U000000B8',
+ "cemptyv;": '\U000029B2',
+ "cent;": '\U000000A2',
+ "centerdot;": '\U000000B7',
+ "cfr;": '\U0001D520',
+ "chcy;": '\U00000447',
+ "check;": '\U00002713',
+ "checkmark;": '\U00002713',
+ "chi;": '\U000003C7',
+ "cir;": '\U000025CB',
+ "cirE;": '\U000029C3',
+ "circ;": '\U000002C6',
+ "circeq;": '\U00002257',
+ "circlearrowleft;": '\U000021BA',
+ "circlearrowright;": '\U000021BB',
+ "circledR;": '\U000000AE',
+ "circledS;": '\U000024C8',
+ "circledast;": '\U0000229B',
+ "circledcirc;": '\U0000229A',
+ "circleddash;": '\U0000229D',
+ "cire;": '\U00002257',
+ "cirfnint;": '\U00002A10',
+ "cirmid;": '\U00002AEF',
+ "cirscir;": '\U000029C2',
+ "clubs;": '\U00002663',
+ "clubsuit;": '\U00002663',
+ "colon;": '\U0000003A',
+ "colone;": '\U00002254',
+ "coloneq;": '\U00002254',
+ "comma;": '\U0000002C',
+ "commat;": '\U00000040',
+ "comp;": '\U00002201',
+ "compfn;": '\U00002218',
+ "complement;": '\U00002201',
+ "complexes;": '\U00002102',
+ "cong;": '\U00002245',
+ "congdot;": '\U00002A6D',
+ "conint;": '\U0000222E',
+ "copf;": '\U0001D554',
+ "coprod;": '\U00002210',
+ "copy;": '\U000000A9',
+ "copysr;": '\U00002117',
+ "crarr;": '\U000021B5',
+ "cross;": '\U00002717',
+ "cscr;": '\U0001D4B8',
+ "csub;": '\U00002ACF',
+ "csube;": '\U00002AD1',
+ "csup;": '\U00002AD0',
+ "csupe;": '\U00002AD2',
+ "ctdot;": '\U000022EF',
+ "cudarrl;": '\U00002938',
+ "cudarrr;": '\U00002935',
+ "cuepr;": '\U000022DE',
+ "cuesc;": '\U000022DF',
+ "cularr;": '\U000021B6',
+ "cularrp;": '\U0000293D',
+ "cup;": '\U0000222A',
+ "cupbrcap;": '\U00002A48',
+ "cupcap;": '\U00002A46',
+ "cupcup;": '\U00002A4A',
+ "cupdot;": '\U0000228D',
+ "cupor;": '\U00002A45',
+ "curarr;": '\U000021B7',
+ "curarrm;": '\U0000293C',
+ "curlyeqprec;": '\U000022DE',
+ "curlyeqsucc;": '\U000022DF',
+ "curlyvee;": '\U000022CE',
+ "curlywedge;": '\U000022CF',
+ "curren;": '\U000000A4',
+ "curvearrowleft;": '\U000021B6',
+ "curvearrowright;": '\U000021B7',
+ "cuvee;": '\U000022CE',
+ "cuwed;": '\U000022CF',
+ "cwconint;": '\U00002232',
+ "cwint;": '\U00002231',
+ "cylcty;": '\U0000232D',
+ "dArr;": '\U000021D3',
+ "dHar;": '\U00002965',
+ "dagger;": '\U00002020',
+ "daleth;": '\U00002138',
+ "darr;": '\U00002193',
+ "dash;": '\U00002010',
+ "dashv;": '\U000022A3',
+ "dbkarow;": '\U0000290F',
+ "dblac;": '\U000002DD',
+ "dcaron;": '\U0000010F',
+ "dcy;": '\U00000434',
+ "dd;": '\U00002146',
+ "ddagger;": '\U00002021',
+ "ddarr;": '\U000021CA',
+ "ddotseq;": '\U00002A77',
+ "deg;": '\U000000B0',
+ "delta;": '\U000003B4',
+ "demptyv;": '\U000029B1',
+ "dfisht;": '\U0000297F',
+ "dfr;": '\U0001D521',
+ "dharl;": '\U000021C3',
+ "dharr;": '\U000021C2',
+ "diam;": '\U000022C4',
+ "diamond;": '\U000022C4',
+ "diamondsuit;": '\U00002666',
+ "diams;": '\U00002666',
+ "die;": '\U000000A8',
+ "digamma;": '\U000003DD',
+ "disin;": '\U000022F2',
+ "div;": '\U000000F7',
+ "divide;": '\U000000F7',
+ "divideontimes;": '\U000022C7',
+ "divonx;": '\U000022C7',
+ "djcy;": '\U00000452',
+ "dlcorn;": '\U0000231E',
+ "dlcrop;": '\U0000230D',
+ "dollar;": '\U00000024',
+ "dopf;": '\U0001D555',
+ "dot;": '\U000002D9',
+ "doteq;": '\U00002250',
+ "doteqdot;": '\U00002251',
+ "dotminus;": '\U00002238',
+ "dotplus;": '\U00002214',
+ "dotsquare;": '\U000022A1',
+ "doublebarwedge;": '\U00002306',
+ "downarrow;": '\U00002193',
+ "downdownarrows;": '\U000021CA',
+ "downharpoonleft;": '\U000021C3',
+ "downharpoonright;": '\U000021C2',
+ "drbkarow;": '\U00002910',
+ "drcorn;": '\U0000231F',
+ "drcrop;": '\U0000230C',
+ "dscr;": '\U0001D4B9',
+ "dscy;": '\U00000455',
+ "dsol;": '\U000029F6',
+ "dstrok;": '\U00000111',
+ "dtdot;": '\U000022F1',
+ "dtri;": '\U000025BF',
+ "dtrif;": '\U000025BE',
+ "duarr;": '\U000021F5',
+ "duhar;": '\U0000296F',
+ "dwangle;": '\U000029A6',
+ "dzcy;": '\U0000045F',
+ "dzigrarr;": '\U000027FF',
+ "eDDot;": '\U00002A77',
+ "eDot;": '\U00002251',
+ "eacute;": '\U000000E9',
+ "easter;": '\U00002A6E',
+ "ecaron;": '\U0000011B',
+ "ecir;": '\U00002256',
+ "ecirc;": '\U000000EA',
+ "ecolon;": '\U00002255',
+ "ecy;": '\U0000044D',
+ "edot;": '\U00000117',
+ "ee;": '\U00002147',
+ "efDot;": '\U00002252',
+ "efr;": '\U0001D522',
+ "eg;": '\U00002A9A',
+ "egrave;": '\U000000E8',
+ "egs;": '\U00002A96',
+ "egsdot;": '\U00002A98',
+ "el;": '\U00002A99',
+ "elinters;": '\U000023E7',
+ "ell;": '\U00002113',
+ "els;": '\U00002A95',
+ "elsdot;": '\U00002A97',
+ "emacr;": '\U00000113',
+ "empty;": '\U00002205',
+ "emptyset;": '\U00002205',
+ "emptyv;": '\U00002205',
+ "emsp;": '\U00002003',
+ "emsp13;": '\U00002004',
+ "emsp14;": '\U00002005',
+ "eng;": '\U0000014B',
+ "ensp;": '\U00002002',
+ "eogon;": '\U00000119',
+ "eopf;": '\U0001D556',
+ "epar;": '\U000022D5',
+ "eparsl;": '\U000029E3',
+ "eplus;": '\U00002A71',
+ "epsi;": '\U000003B5',
+ "epsilon;": '\U000003B5',
+ "epsiv;": '\U000003F5',
+ "eqcirc;": '\U00002256',
+ "eqcolon;": '\U00002255',
+ "eqsim;": '\U00002242',
+ "eqslantgtr;": '\U00002A96',
+ "eqslantless;": '\U00002A95',
+ "equals;": '\U0000003D',
+ "equest;": '\U0000225F',
+ "equiv;": '\U00002261',
+ "equivDD;": '\U00002A78',
+ "eqvparsl;": '\U000029E5',
+ "erDot;": '\U00002253',
+ "erarr;": '\U00002971',
+ "escr;": '\U0000212F',
+ "esdot;": '\U00002250',
+ "esim;": '\U00002242',
+ "eta;": '\U000003B7',
+ "eth;": '\U000000F0',
+ "euml;": '\U000000EB',
+ "euro;": '\U000020AC',
+ "excl;": '\U00000021',
+ "exist;": '\U00002203',
+ "expectation;": '\U00002130',
+ "exponentiale;": '\U00002147',
+ "fallingdotseq;": '\U00002252',
+ "fcy;": '\U00000444',
+ "female;": '\U00002640',
+ "ffilig;": '\U0000FB03',
+ "fflig;": '\U0000FB00',
+ "ffllig;": '\U0000FB04',
+ "ffr;": '\U0001D523',
+ "filig;": '\U0000FB01',
+ "flat;": '\U0000266D',
+ "fllig;": '\U0000FB02',
+ "fltns;": '\U000025B1',
+ "fnof;": '\U00000192',
+ "fopf;": '\U0001D557',
+ "forall;": '\U00002200',
+ "fork;": '\U000022D4',
+ "forkv;": '\U00002AD9',
+ "fpartint;": '\U00002A0D',
+ "frac12;": '\U000000BD',
+ "frac13;": '\U00002153',
+ "frac14;": '\U000000BC',
+ "frac15;": '\U00002155',
+ "frac16;": '\U00002159',
+ "frac18;": '\U0000215B',
+ "frac23;": '\U00002154',
+ "frac25;": '\U00002156',
+ "frac34;": '\U000000BE',
+ "frac35;": '\U00002157',
+ "frac38;": '\U0000215C',
+ "frac45;": '\U00002158',
+ "frac56;": '\U0000215A',
+ "frac58;": '\U0000215D',
+ "frac78;": '\U0000215E',
+ "frasl;": '\U00002044',
+ "frown;": '\U00002322',
+ "fscr;": '\U0001D4BB',
+ "gE;": '\U00002267',
+ "gEl;": '\U00002A8C',
+ "gacute;": '\U000001F5',
+ "gamma;": '\U000003B3',
+ "gammad;": '\U000003DD',
+ "gap;": '\U00002A86',
+ "gbreve;": '\U0000011F',
+ "gcirc;": '\U0000011D',
+ "gcy;": '\U00000433',
+ "gdot;": '\U00000121',
+ "ge;": '\U00002265',
+ "gel;": '\U000022DB',
+ "geq;": '\U00002265',
+ "geqq;": '\U00002267',
+ "geqslant;": '\U00002A7E',
+ "ges;": '\U00002A7E',
+ "gescc;": '\U00002AA9',
+ "gesdot;": '\U00002A80',
+ "gesdoto;": '\U00002A82',
+ "gesdotol;": '\U00002A84',
+ "gesles;": '\U00002A94',
+ "gfr;": '\U0001D524',
+ "gg;": '\U0000226B',
+ "ggg;": '\U000022D9',
+ "gimel;": '\U00002137',
+ "gjcy;": '\U00000453',
+ "gl;": '\U00002277',
+ "glE;": '\U00002A92',
+ "gla;": '\U00002AA5',
+ "glj;": '\U00002AA4',
+ "gnE;": '\U00002269',
+ "gnap;": '\U00002A8A',
+ "gnapprox;": '\U00002A8A',
+ "gne;": '\U00002A88',
+ "gneq;": '\U00002A88',
+ "gneqq;": '\U00002269',
+ "gnsim;": '\U000022E7',
+ "gopf;": '\U0001D558',
+ "grave;": '\U00000060',
+ "gscr;": '\U0000210A',
+ "gsim;": '\U00002273',
+ "gsime;": '\U00002A8E',
+ "gsiml;": '\U00002A90',
+ "gt;": '\U0000003E',
+ "gtcc;": '\U00002AA7',
+ "gtcir;": '\U00002A7A',
+ "gtdot;": '\U000022D7',
+ "gtlPar;": '\U00002995',
+ "gtquest;": '\U00002A7C',
+ "gtrapprox;": '\U00002A86',
+ "gtrarr;": '\U00002978',
+ "gtrdot;": '\U000022D7',
+ "gtreqless;": '\U000022DB',
+ "gtreqqless;": '\U00002A8C',
+ "gtrless;": '\U00002277',
+ "gtrsim;": '\U00002273',
+ "hArr;": '\U000021D4',
+ "hairsp;": '\U0000200A',
+ "half;": '\U000000BD',
+ "hamilt;": '\U0000210B',
+ "hardcy;": '\U0000044A',
+ "harr;": '\U00002194',
+ "harrcir;": '\U00002948',
+ "harrw;": '\U000021AD',
+ "hbar;": '\U0000210F',
+ "hcirc;": '\U00000125',
+ "hearts;": '\U00002665',
+ "heartsuit;": '\U00002665',
+ "hellip;": '\U00002026',
+ "hercon;": '\U000022B9',
+ "hfr;": '\U0001D525',
+ "hksearow;": '\U00002925',
+ "hkswarow;": '\U00002926',
+ "hoarr;": '\U000021FF',
+ "homtht;": '\U0000223B',
+ "hookleftarrow;": '\U000021A9',
+ "hookrightarrow;": '\U000021AA',
+ "hopf;": '\U0001D559',
+ "horbar;": '\U00002015',
+ "hscr;": '\U0001D4BD',
+ "hslash;": '\U0000210F',
+ "hstrok;": '\U00000127',
+ "hybull;": '\U00002043',
+ "hyphen;": '\U00002010',
+ "iacute;": '\U000000ED',
+ "ic;": '\U00002063',
+ "icirc;": '\U000000EE',
+ "icy;": '\U00000438',
+ "iecy;": '\U00000435',
+ "iexcl;": '\U000000A1',
+ "iff;": '\U000021D4',
+ "ifr;": '\U0001D526',
+ "igrave;": '\U000000EC',
+ "ii;": '\U00002148',
+ "iiiint;": '\U00002A0C',
+ "iiint;": '\U0000222D',
+ "iinfin;": '\U000029DC',
+ "iiota;": '\U00002129',
+ "ijlig;": '\U00000133',
+ "imacr;": '\U0000012B',
+ "image;": '\U00002111',
+ "imagline;": '\U00002110',
+ "imagpart;": '\U00002111',
+ "imath;": '\U00000131',
+ "imof;": '\U000022B7',
+ "imped;": '\U000001B5',
+ "in;": '\U00002208',
+ "incare;": '\U00002105',
+ "infin;": '\U0000221E',
+ "infintie;": '\U000029DD',
+ "inodot;": '\U00000131',
+ "int;": '\U0000222B',
+ "intcal;": '\U000022BA',
+ "integers;": '\U00002124',
+ "intercal;": '\U000022BA',
+ "intlarhk;": '\U00002A17',
+ "intprod;": '\U00002A3C',
+ "iocy;": '\U00000451',
+ "iogon;": '\U0000012F',
+ "iopf;": '\U0001D55A',
+ "iota;": '\U000003B9',
+ "iprod;": '\U00002A3C',
+ "iquest;": '\U000000BF',
+ "iscr;": '\U0001D4BE',
+ "isin;": '\U00002208',
+ "isinE;": '\U000022F9',
+ "isindot;": '\U000022F5',
+ "isins;": '\U000022F4',
+ "isinsv;": '\U000022F3',
+ "isinv;": '\U00002208',
+ "it;": '\U00002062',
+ "itilde;": '\U00000129',
+ "iukcy;": '\U00000456',
+ "iuml;": '\U000000EF',
+ "jcirc;": '\U00000135',
+ "jcy;": '\U00000439',
+ "jfr;": '\U0001D527',
+ "jmath;": '\U00000237',
+ "jopf;": '\U0001D55B',
+ "jscr;": '\U0001D4BF',
+ "jsercy;": '\U00000458',
+ "jukcy;": '\U00000454',
+ "kappa;": '\U000003BA',
+ "kappav;": '\U000003F0',
+ "kcedil;": '\U00000137',
+ "kcy;": '\U0000043A',
+ "kfr;": '\U0001D528',
+ "kgreen;": '\U00000138',
+ "khcy;": '\U00000445',
+ "kjcy;": '\U0000045C',
+ "kopf;": '\U0001D55C',
+ "kscr;": '\U0001D4C0',
+ "lAarr;": '\U000021DA',
+ "lArr;": '\U000021D0',
+ "lAtail;": '\U0000291B',
+ "lBarr;": '\U0000290E',
+ "lE;": '\U00002266',
+ "lEg;": '\U00002A8B',
+ "lHar;": '\U00002962',
+ "lacute;": '\U0000013A',
+ "laemptyv;": '\U000029B4',
+ "lagran;": '\U00002112',
+ "lambda;": '\U000003BB',
+ "lang;": '\U000027E8',
+ "langd;": '\U00002991',
+ "langle;": '\U000027E8',
+ "lap;": '\U00002A85',
+ "laquo;": '\U000000AB',
+ "larr;": '\U00002190',
+ "larrb;": '\U000021E4',
+ "larrbfs;": '\U0000291F',
+ "larrfs;": '\U0000291D',
+ "larrhk;": '\U000021A9',
+ "larrlp;": '\U000021AB',
+ "larrpl;": '\U00002939',
+ "larrsim;": '\U00002973',
+ "larrtl;": '\U000021A2',
+ "lat;": '\U00002AAB',
+ "latail;": '\U00002919',
+ "late;": '\U00002AAD',
+ "lbarr;": '\U0000290C',
+ "lbbrk;": '\U00002772',
+ "lbrace;": '\U0000007B',
+ "lbrack;": '\U0000005B',
+ "lbrke;": '\U0000298B',
+ "lbrksld;": '\U0000298F',
+ "lbrkslu;": '\U0000298D',
+ "lcaron;": '\U0000013E',
+ "lcedil;": '\U0000013C',
+ "lceil;": '\U00002308',
+ "lcub;": '\U0000007B',
+ "lcy;": '\U0000043B',
+ "ldca;": '\U00002936',
+ "ldquo;": '\U0000201C',
+ "ldquor;": '\U0000201E',
+ "ldrdhar;": '\U00002967',
+ "ldrushar;": '\U0000294B',
+ "ldsh;": '\U000021B2',
+ "le;": '\U00002264',
+ "leftarrow;": '\U00002190',
+ "leftarrowtail;": '\U000021A2',
+ "leftharpoondown;": '\U000021BD',
+ "leftharpoonup;": '\U000021BC',
+ "leftleftarrows;": '\U000021C7',
+ "leftrightarrow;": '\U00002194',
+ "leftrightarrows;": '\U000021C6',
+ "leftrightharpoons;": '\U000021CB',
+ "leftrightsquigarrow;": '\U000021AD',
+ "leftthreetimes;": '\U000022CB',
+ "leg;": '\U000022DA',
+ "leq;": '\U00002264',
+ "leqq;": '\U00002266',
+ "leqslant;": '\U00002A7D',
+ "les;": '\U00002A7D',
+ "lescc;": '\U00002AA8',
+ "lesdot;": '\U00002A7F',
+ "lesdoto;": '\U00002A81',
+ "lesdotor;": '\U00002A83',
+ "lesges;": '\U00002A93',
+ "lessapprox;": '\U00002A85',
+ "lessdot;": '\U000022D6',
+ "lesseqgtr;": '\U000022DA',
+ "lesseqqgtr;": '\U00002A8B',
+ "lessgtr;": '\U00002276',
+ "lesssim;": '\U00002272',
+ "lfisht;": '\U0000297C',
+ "lfloor;": '\U0000230A',
+ "lfr;": '\U0001D529',
+ "lg;": '\U00002276',
+ "lgE;": '\U00002A91',
+ "lhard;": '\U000021BD',
+ "lharu;": '\U000021BC',
+ "lharul;": '\U0000296A',
+ "lhblk;": '\U00002584',
+ "ljcy;": '\U00000459',
+ "ll;": '\U0000226A',
+ "llarr;": '\U000021C7',
+ "llcorner;": '\U0000231E',
+ "llhard;": '\U0000296B',
+ "lltri;": '\U000025FA',
+ "lmidot;": '\U00000140',
+ "lmoust;": '\U000023B0',
+ "lmoustache;": '\U000023B0',
+ "lnE;": '\U00002268',
+ "lnap;": '\U00002A89',
+ "lnapprox;": '\U00002A89',
+ "lne;": '\U00002A87',
+ "lneq;": '\U00002A87',
+ "lneqq;": '\U00002268',
+ "lnsim;": '\U000022E6',
+ "loang;": '\U000027EC',
+ "loarr;": '\U000021FD',
+ "lobrk;": '\U000027E6',
+ "longleftarrow;": '\U000027F5',
+ "longleftrightarrow;": '\U000027F7',
+ "longmapsto;": '\U000027FC',
+ "longrightarrow;": '\U000027F6',
+ "looparrowleft;": '\U000021AB',
+ "looparrowright;": '\U000021AC',
+ "lopar;": '\U00002985',
+ "lopf;": '\U0001D55D',
+ "loplus;": '\U00002A2D',
+ "lotimes;": '\U00002A34',
+ "lowast;": '\U00002217',
+ "lowbar;": '\U0000005F',
+ "loz;": '\U000025CA',
+ "lozenge;": '\U000025CA',
+ "lozf;": '\U000029EB',
+ "lpar;": '\U00000028',
+ "lparlt;": '\U00002993',
+ "lrarr;": '\U000021C6',
+ "lrcorner;": '\U0000231F',
+ "lrhar;": '\U000021CB',
+ "lrhard;": '\U0000296D',
+ "lrm;": '\U0000200E',
+ "lrtri;": '\U000022BF',
+ "lsaquo;": '\U00002039',
+ "lscr;": '\U0001D4C1',
+ "lsh;": '\U000021B0',
+ "lsim;": '\U00002272',
+ "lsime;": '\U00002A8D',
+ "lsimg;": '\U00002A8F',
+ "lsqb;": '\U0000005B',
+ "lsquo;": '\U00002018',
+ "lsquor;": '\U0000201A',
+ "lstrok;": '\U00000142',
+ "lt;": '\U0000003C',
+ "ltcc;": '\U00002AA6',
+ "ltcir;": '\U00002A79',
+ "ltdot;": '\U000022D6',
+ "lthree;": '\U000022CB',
+ "ltimes;": '\U000022C9',
+ "ltlarr;": '\U00002976',
+ "ltquest;": '\U00002A7B',
+ "ltrPar;": '\U00002996',
+ "ltri;": '\U000025C3',
+ "ltrie;": '\U000022B4',
+ "ltrif;": '\U000025C2',
+ "lurdshar;": '\U0000294A',
+ "luruhar;": '\U00002966',
+ "mDDot;": '\U0000223A',
+ "macr;": '\U000000AF',
+ "male;": '\U00002642',
+ "malt;": '\U00002720',
+ "maltese;": '\U00002720',
+ "map;": '\U000021A6',
+ "mapsto;": '\U000021A6',
+ "mapstodown;": '\U000021A7',
+ "mapstoleft;": '\U000021A4',
+ "mapstoup;": '\U000021A5',
+ "marker;": '\U000025AE',
+ "mcomma;": '\U00002A29',
+ "mcy;": '\U0000043C',
+ "mdash;": '\U00002014',
+ "measuredangle;": '\U00002221',
+ "mfr;": '\U0001D52A',
+ "mho;": '\U00002127',
+ "micro;": '\U000000B5',
+ "mid;": '\U00002223',
+ "midast;": '\U0000002A',
+ "midcir;": '\U00002AF0',
+ "middot;": '\U000000B7',
+ "minus;": '\U00002212',
+ "minusb;": '\U0000229F',
+ "minusd;": '\U00002238',
+ "minusdu;": '\U00002A2A',
+ "mlcp;": '\U00002ADB',
+ "mldr;": '\U00002026',
+ "mnplus;": '\U00002213',
+ "models;": '\U000022A7',
+ "mopf;": '\U0001D55E',
+ "mp;": '\U00002213',
+ "mscr;": '\U0001D4C2',
+ "mstpos;": '\U0000223E',
+ "mu;": '\U000003BC',
+ "multimap;": '\U000022B8',
+ "mumap;": '\U000022B8',
+ "nLeftarrow;": '\U000021CD',
+ "nLeftrightarrow;": '\U000021CE',
+ "nRightarrow;": '\U000021CF',
+ "nVDash;": '\U000022AF',
+ "nVdash;": '\U000022AE',
+ "nabla;": '\U00002207',
+ "nacute;": '\U00000144',
+ "nap;": '\U00002249',
+ "napos;": '\U00000149',
+ "napprox;": '\U00002249',
+ "natur;": '\U0000266E',
+ "natural;": '\U0000266E',
+ "naturals;": '\U00002115',
+ "nbsp;": '\U000000A0',
+ "ncap;": '\U00002A43',
+ "ncaron;": '\U00000148',
+ "ncedil;": '\U00000146',
+ "ncong;": '\U00002247',
+ "ncup;": '\U00002A42',
+ "ncy;": '\U0000043D',
+ "ndash;": '\U00002013',
+ "ne;": '\U00002260',
+ "neArr;": '\U000021D7',
+ "nearhk;": '\U00002924',
+ "nearr;": '\U00002197',
+ "nearrow;": '\U00002197',
+ "nequiv;": '\U00002262',
+ "nesear;": '\U00002928',
+ "nexist;": '\U00002204',
+ "nexists;": '\U00002204',
+ "nfr;": '\U0001D52B',
+ "nge;": '\U00002271',
+ "ngeq;": '\U00002271',
+ "ngsim;": '\U00002275',
+ "ngt;": '\U0000226F',
+ "ngtr;": '\U0000226F',
+ "nhArr;": '\U000021CE',
+ "nharr;": '\U000021AE',
+ "nhpar;": '\U00002AF2',
+ "ni;": '\U0000220B',
+ "nis;": '\U000022FC',
+ "nisd;": '\U000022FA',
+ "niv;": '\U0000220B',
+ "njcy;": '\U0000045A',
+ "nlArr;": '\U000021CD',
+ "nlarr;": '\U0000219A',
+ "nldr;": '\U00002025',
+ "nle;": '\U00002270',
+ "nleftarrow;": '\U0000219A',
+ "nleftrightarrow;": '\U000021AE',
+ "nleq;": '\U00002270',
+ "nless;": '\U0000226E',
+ "nlsim;": '\U00002274',
+ "nlt;": '\U0000226E',
+ "nltri;": '\U000022EA',
+ "nltrie;": '\U000022EC',
+ "nmid;": '\U00002224',
+ "nopf;": '\U0001D55F',
+ "not;": '\U000000AC',
+ "notin;": '\U00002209',
+ "notinva;": '\U00002209',
+ "notinvb;": '\U000022F7',
+ "notinvc;": '\U000022F6',
+ "notni;": '\U0000220C',
+ "notniva;": '\U0000220C',
+ "notnivb;": '\U000022FE',
+ "notnivc;": '\U000022FD',
+ "npar;": '\U00002226',
+ "nparallel;": '\U00002226',
+ "npolint;": '\U00002A14',
+ "npr;": '\U00002280',
+ "nprcue;": '\U000022E0',
+ "nprec;": '\U00002280',
+ "nrArr;": '\U000021CF',
+ "nrarr;": '\U0000219B',
+ "nrightarrow;": '\U0000219B',
+ "nrtri;": '\U000022EB',
+ "nrtrie;": '\U000022ED',
+ "nsc;": '\U00002281',
+ "nsccue;": '\U000022E1',
+ "nscr;": '\U0001D4C3',
+ "nshortmid;": '\U00002224',
+ "nshortparallel;": '\U00002226',
+ "nsim;": '\U00002241',
+ "nsime;": '\U00002244',
+ "nsimeq;": '\U00002244',
+ "nsmid;": '\U00002224',
+ "nspar;": '\U00002226',
+ "nsqsube;": '\U000022E2',
+ "nsqsupe;": '\U000022E3',
+ "nsub;": '\U00002284',
+ "nsube;": '\U00002288',
+ "nsubseteq;": '\U00002288',
+ "nsucc;": '\U00002281',
+ "nsup;": '\U00002285',
+ "nsupe;": '\U00002289',
+ "nsupseteq;": '\U00002289',
+ "ntgl;": '\U00002279',
+ "ntilde;": '\U000000F1',
+ "ntlg;": '\U00002278',
+ "ntriangleleft;": '\U000022EA',
+ "ntrianglelefteq;": '\U000022EC',
+ "ntriangleright;": '\U000022EB',
+ "ntrianglerighteq;": '\U000022ED',
+ "nu;": '\U000003BD',
+ "num;": '\U00000023',
+ "numero;": '\U00002116',
+ "numsp;": '\U00002007',
+ "nvDash;": '\U000022AD',
+ "nvHarr;": '\U00002904',
+ "nvdash;": '\U000022AC',
+ "nvinfin;": '\U000029DE',
+ "nvlArr;": '\U00002902',
+ "nvrArr;": '\U00002903',
+ "nwArr;": '\U000021D6',
+ "nwarhk;": '\U00002923',
+ "nwarr;": '\U00002196',
+ "nwarrow;": '\U00002196',
+ "nwnear;": '\U00002927',
+ "oS;": '\U000024C8',
+ "oacute;": '\U000000F3',
+ "oast;": '\U0000229B',
+ "ocir;": '\U0000229A',
+ "ocirc;": '\U000000F4',
+ "ocy;": '\U0000043E',
+ "odash;": '\U0000229D',
+ "odblac;": '\U00000151',
+ "odiv;": '\U00002A38',
+ "odot;": '\U00002299',
+ "odsold;": '\U000029BC',
+ "oelig;": '\U00000153',
+ "ofcir;": '\U000029BF',
+ "ofr;": '\U0001D52C',
+ "ogon;": '\U000002DB',
+ "ograve;": '\U000000F2',
+ "ogt;": '\U000029C1',
+ "ohbar;": '\U000029B5',
+ "ohm;": '\U000003A9',
+ "oint;": '\U0000222E',
+ "olarr;": '\U000021BA',
+ "olcir;": '\U000029BE',
+ "olcross;": '\U000029BB',
+ "oline;": '\U0000203E',
+ "olt;": '\U000029C0',
+ "omacr;": '\U0000014D',
+ "omega;": '\U000003C9',
+ "omicron;": '\U000003BF',
+ "omid;": '\U000029B6',
+ "ominus;": '\U00002296',
+ "oopf;": '\U0001D560',
+ "opar;": '\U000029B7',
+ "operp;": '\U000029B9',
+ "oplus;": '\U00002295',
+ "or;": '\U00002228',
+ "orarr;": '\U000021BB',
+ "ord;": '\U00002A5D',
+ "order;": '\U00002134',
+ "orderof;": '\U00002134',
+ "ordf;": '\U000000AA',
+ "ordm;": '\U000000BA',
+ "origof;": '\U000022B6',
+ "oror;": '\U00002A56',
+ "orslope;": '\U00002A57',
+ "orv;": '\U00002A5B',
+ "oscr;": '\U00002134',
+ "oslash;": '\U000000F8',
+ "osol;": '\U00002298',
+ "otilde;": '\U000000F5',
+ "otimes;": '\U00002297',
+ "otimesas;": '\U00002A36',
+ "ouml;": '\U000000F6',
+ "ovbar;": '\U0000233D',
+ "par;": '\U00002225',
+ "para;": '\U000000B6',
+ "parallel;": '\U00002225',
+ "parsim;": '\U00002AF3',
+ "parsl;": '\U00002AFD',
+ "part;": '\U00002202',
+ "pcy;": '\U0000043F',
+ "percnt;": '\U00000025',
+ "period;": '\U0000002E',
+ "permil;": '\U00002030',
+ "perp;": '\U000022A5',
+ "pertenk;": '\U00002031',
+ "pfr;": '\U0001D52D',
+ "phi;": '\U000003C6',
+ "phiv;": '\U000003D5',
+ "phmmat;": '\U00002133',
+ "phone;": '\U0000260E',
+ "pi;": '\U000003C0',
+ "pitchfork;": '\U000022D4',
+ "piv;": '\U000003D6',
+ "planck;": '\U0000210F',
+ "planckh;": '\U0000210E',
+ "plankv;": '\U0000210F',
+ "plus;": '\U0000002B',
+ "plusacir;": '\U00002A23',
+ "plusb;": '\U0000229E',
+ "pluscir;": '\U00002A22',
+ "plusdo;": '\U00002214',
+ "plusdu;": '\U00002A25',
+ "pluse;": '\U00002A72',
+ "plusmn;": '\U000000B1',
+ "plussim;": '\U00002A26',
+ "plustwo;": '\U00002A27',
+ "pm;": '\U000000B1',
+ "pointint;": '\U00002A15',
+ "popf;": '\U0001D561',
+ "pound;": '\U000000A3',
+ "pr;": '\U0000227A',
+ "prE;": '\U00002AB3',
+ "prap;": '\U00002AB7',
+ "prcue;": '\U0000227C',
+ "pre;": '\U00002AAF',
+ "prec;": '\U0000227A',
+ "precapprox;": '\U00002AB7',
+ "preccurlyeq;": '\U0000227C',
+ "preceq;": '\U00002AAF',
+ "precnapprox;": '\U00002AB9',
+ "precneqq;": '\U00002AB5',
+ "precnsim;": '\U000022E8',
+ "precsim;": '\U0000227E',
+ "prime;": '\U00002032',
+ "primes;": '\U00002119',
+ "prnE;": '\U00002AB5',
+ "prnap;": '\U00002AB9',
+ "prnsim;": '\U000022E8',
+ "prod;": '\U0000220F',
+ "profalar;": '\U0000232E',
+ "profline;": '\U00002312',
+ "profsurf;": '\U00002313',
+ "prop;": '\U0000221D',
+ "propto;": '\U0000221D',
+ "prsim;": '\U0000227E',
+ "prurel;": '\U000022B0',
+ "pscr;": '\U0001D4C5',
+ "psi;": '\U000003C8',
+ "puncsp;": '\U00002008',
+ "qfr;": '\U0001D52E',
+ "qint;": '\U00002A0C',
+ "qopf;": '\U0001D562',
+ "qprime;": '\U00002057',
+ "qscr;": '\U0001D4C6',
+ "quaternions;": '\U0000210D',
+ "quatint;": '\U00002A16',
+ "quest;": '\U0000003F',
+ "questeq;": '\U0000225F',
+ "quot;": '\U00000022',
+ "rAarr;": '\U000021DB',
+ "rArr;": '\U000021D2',
+ "rAtail;": '\U0000291C',
+ "rBarr;": '\U0000290F',
+ "rHar;": '\U00002964',
+ "racute;": '\U00000155',
+ "radic;": '\U0000221A',
+ "raemptyv;": '\U000029B3',
+ "rang;": '\U000027E9',
+ "rangd;": '\U00002992',
+ "range;": '\U000029A5',
+ "rangle;": '\U000027E9',
+ "raquo;": '\U000000BB',
+ "rarr;": '\U00002192',
+ "rarrap;": '\U00002975',
+ "rarrb;": '\U000021E5',
+ "rarrbfs;": '\U00002920',
+ "rarrc;": '\U00002933',
+ "rarrfs;": '\U0000291E',
+ "rarrhk;": '\U000021AA',
+ "rarrlp;": '\U000021AC',
+ "rarrpl;": '\U00002945',
+ "rarrsim;": '\U00002974',
+ "rarrtl;": '\U000021A3',
+ "rarrw;": '\U0000219D',
+ "ratail;": '\U0000291A',
+ "ratio;": '\U00002236',
+ "rationals;": '\U0000211A',
+ "rbarr;": '\U0000290D',
+ "rbbrk;": '\U00002773',
+ "rbrace;": '\U0000007D',
+ "rbrack;": '\U0000005D',
+ "rbrke;": '\U0000298C',
+ "rbrksld;": '\U0000298E',
+ "rbrkslu;": '\U00002990',
+ "rcaron;": '\U00000159',
+ "rcedil;": '\U00000157',
+ "rceil;": '\U00002309',
+ "rcub;": '\U0000007D',
+ "rcy;": '\U00000440',
+ "rdca;": '\U00002937',
+ "rdldhar;": '\U00002969',
+ "rdquo;": '\U0000201D',
+ "rdquor;": '\U0000201D',
+ "rdsh;": '\U000021B3',
+ "real;": '\U0000211C',
+ "realine;": '\U0000211B',
+ "realpart;": '\U0000211C',
+ "reals;": '\U0000211D',
+ "rect;": '\U000025AD',
+ "reg;": '\U000000AE',
+ "rfisht;": '\U0000297D',
+ "rfloor;": '\U0000230B',
+ "rfr;": '\U0001D52F',
+ "rhard;": '\U000021C1',
+ "rharu;": '\U000021C0',
+ "rharul;": '\U0000296C',
+ "rho;": '\U000003C1',
+ "rhov;": '\U000003F1',
+ "rightarrow;": '\U00002192',
+ "rightarrowtail;": '\U000021A3',
+ "rightharpoondown;": '\U000021C1',
+ "rightharpoonup;": '\U000021C0',
+ "rightleftarrows;": '\U000021C4',
+ "rightleftharpoons;": '\U000021CC',
+ "rightrightarrows;": '\U000021C9',
+ "rightsquigarrow;": '\U0000219D',
+ "rightthreetimes;": '\U000022CC',
+ "ring;": '\U000002DA',
+ "risingdotseq;": '\U00002253',
+ "rlarr;": '\U000021C4',
+ "rlhar;": '\U000021CC',
+ "rlm;": '\U0000200F',
+ "rmoust;": '\U000023B1',
+ "rmoustache;": '\U000023B1',
+ "rnmid;": '\U00002AEE',
+ "roang;": '\U000027ED',
+ "roarr;": '\U000021FE',
+ "robrk;": '\U000027E7',
+ "ropar;": '\U00002986',
+ "ropf;": '\U0001D563',
+ "roplus;": '\U00002A2E',
+ "rotimes;": '\U00002A35',
+ "rpar;": '\U00000029',
+ "rpargt;": '\U00002994',
+ "rppolint;": '\U00002A12',
+ "rrarr;": '\U000021C9',
+ "rsaquo;": '\U0000203A',
+ "rscr;": '\U0001D4C7',
+ "rsh;": '\U000021B1',
+ "rsqb;": '\U0000005D',
+ "rsquo;": '\U00002019',
+ "rsquor;": '\U00002019',
+ "rthree;": '\U000022CC',
+ "rtimes;": '\U000022CA',
+ "rtri;": '\U000025B9',
+ "rtrie;": '\U000022B5',
+ "rtrif;": '\U000025B8',
+ "rtriltri;": '\U000029CE',
+ "ruluhar;": '\U00002968',
+ "rx;": '\U0000211E',
+ "sacute;": '\U0000015B',
+ "sbquo;": '\U0000201A',
+ "sc;": '\U0000227B',
+ "scE;": '\U00002AB4',
+ "scap;": '\U00002AB8',
+ "scaron;": '\U00000161',
+ "sccue;": '\U0000227D',
+ "sce;": '\U00002AB0',
+ "scedil;": '\U0000015F',
+ "scirc;": '\U0000015D',
+ "scnE;": '\U00002AB6',
+ "scnap;": '\U00002ABA',
+ "scnsim;": '\U000022E9',
+ "scpolint;": '\U00002A13',
+ "scsim;": '\U0000227F',
+ "scy;": '\U00000441',
+ "sdot;": '\U000022C5',
+ "sdotb;": '\U000022A1',
+ "sdote;": '\U00002A66',
+ "seArr;": '\U000021D8',
+ "searhk;": '\U00002925',
+ "searr;": '\U00002198',
+ "searrow;": '\U00002198',
+ "sect;": '\U000000A7',
+ "semi;": '\U0000003B',
+ "seswar;": '\U00002929',
+ "setminus;": '\U00002216',
+ "setmn;": '\U00002216',
+ "sext;": '\U00002736',
+ "sfr;": '\U0001D530',
+ "sfrown;": '\U00002322',
+ "sharp;": '\U0000266F',
+ "shchcy;": '\U00000449',
+ "shcy;": '\U00000448',
+ "shortmid;": '\U00002223',
+ "shortparallel;": '\U00002225',
+ "shy;": '\U000000AD',
+ "sigma;": '\U000003C3',
+ "sigmaf;": '\U000003C2',
+ "sigmav;": '\U000003C2',
+ "sim;": '\U0000223C',
+ "simdot;": '\U00002A6A',
+ "sime;": '\U00002243',
+ "simeq;": '\U00002243',
+ "simg;": '\U00002A9E',
+ "simgE;": '\U00002AA0',
+ "siml;": '\U00002A9D',
+ "simlE;": '\U00002A9F',
+ "simne;": '\U00002246',
+ "simplus;": '\U00002A24',
+ "simrarr;": '\U00002972',
+ "slarr;": '\U00002190',
+ "smallsetminus;": '\U00002216',
+ "smashp;": '\U00002A33',
+ "smeparsl;": '\U000029E4',
+ "smid;": '\U00002223',
+ "smile;": '\U00002323',
+ "smt;": '\U00002AAA',
+ "smte;": '\U00002AAC',
+ "softcy;": '\U0000044C',
+ "sol;": '\U0000002F',
+ "solb;": '\U000029C4',
+ "solbar;": '\U0000233F',
+ "sopf;": '\U0001D564',
+ "spades;": '\U00002660',
+ "spadesuit;": '\U00002660',
+ "spar;": '\U00002225',
+ "sqcap;": '\U00002293',
+ "sqcup;": '\U00002294',
+ "sqsub;": '\U0000228F',
+ "sqsube;": '\U00002291',
+ "sqsubset;": '\U0000228F',
+ "sqsubseteq;": '\U00002291',
+ "sqsup;": '\U00002290',
+ "sqsupe;": '\U00002292',
+ "sqsupset;": '\U00002290',
+ "sqsupseteq;": '\U00002292',
+ "squ;": '\U000025A1',
+ "square;": '\U000025A1',
+ "squarf;": '\U000025AA',
+ "squf;": '\U000025AA',
+ "srarr;": '\U00002192',
+ "sscr;": '\U0001D4C8',
+ "ssetmn;": '\U00002216',
+ "ssmile;": '\U00002323',
+ "sstarf;": '\U000022C6',
+ "star;": '\U00002606',
+ "starf;": '\U00002605',
+ "straightepsilon;": '\U000003F5',
+ "straightphi;": '\U000003D5',
+ "strns;": '\U000000AF',
+ "sub;": '\U00002282',
+ "subE;": '\U00002AC5',
+ "subdot;": '\U00002ABD',
+ "sube;": '\U00002286',
+ "subedot;": '\U00002AC3',
+ "submult;": '\U00002AC1',
+ "subnE;": '\U00002ACB',
+ "subne;": '\U0000228A',
+ "subplus;": '\U00002ABF',
+ "subrarr;": '\U00002979',
+ "subset;": '\U00002282',
+ "subseteq;": '\U00002286',
+ "subseteqq;": '\U00002AC5',
+ "subsetneq;": '\U0000228A',
+ "subsetneqq;": '\U00002ACB',
+ "subsim;": '\U00002AC7',
+ "subsub;": '\U00002AD5',
+ "subsup;": '\U00002AD3',
+ "succ;": '\U0000227B',
+ "succapprox;": '\U00002AB8',
+ "succcurlyeq;": '\U0000227D',
+ "succeq;": '\U00002AB0',
+ "succnapprox;": '\U00002ABA',
+ "succneqq;": '\U00002AB6',
+ "succnsim;": '\U000022E9',
+ "succsim;": '\U0000227F',
+ "sum;": '\U00002211',
+ "sung;": '\U0000266A',
+ "sup;": '\U00002283',
+ "sup1;": '\U000000B9',
+ "sup2;": '\U000000B2',
+ "sup3;": '\U000000B3',
+ "supE;": '\U00002AC6',
+ "supdot;": '\U00002ABE',
+ "supdsub;": '\U00002AD8',
+ "supe;": '\U00002287',
+ "supedot;": '\U00002AC4',
+ "suphsol;": '\U000027C9',
+ "suphsub;": '\U00002AD7',
+ "suplarr;": '\U0000297B',
+ "supmult;": '\U00002AC2',
+ "supnE;": '\U00002ACC',
+ "supne;": '\U0000228B',
+ "supplus;": '\U00002AC0',
+ "supset;": '\U00002283',
+ "supseteq;": '\U00002287',
+ "supseteqq;": '\U00002AC6',
+ "supsetneq;": '\U0000228B',
+ "supsetneqq;": '\U00002ACC',
+ "supsim;": '\U00002AC8',
+ "supsub;": '\U00002AD4',
+ "supsup;": '\U00002AD6',
+ "swArr;": '\U000021D9',
+ "swarhk;": '\U00002926',
+ "swarr;": '\U00002199',
+ "swarrow;": '\U00002199',
+ "swnwar;": '\U0000292A',
+ "szlig;": '\U000000DF',
+ "target;": '\U00002316',
+ "tau;": '\U000003C4',
+ "tbrk;": '\U000023B4',
+ "tcaron;": '\U00000165',
+ "tcedil;": '\U00000163',
+ "tcy;": '\U00000442',
+ "tdot;": '\U000020DB',
+ "telrec;": '\U00002315',
+ "tfr;": '\U0001D531',
+ "there4;": '\U00002234',
+ "therefore;": '\U00002234',
+ "theta;": '\U000003B8',
+ "thetasym;": '\U000003D1',
+ "thetav;": '\U000003D1',
+ "thickapprox;": '\U00002248',
+ "thicksim;": '\U0000223C',
+ "thinsp;": '\U00002009',
+ "thkap;": '\U00002248',
+ "thksim;": '\U0000223C',
+ "thorn;": '\U000000FE',
+ "tilde;": '\U000002DC',
+ "times;": '\U000000D7',
+ "timesb;": '\U000022A0',
+ "timesbar;": '\U00002A31',
+ "timesd;": '\U00002A30',
+ "tint;": '\U0000222D',
+ "toea;": '\U00002928',
+ "top;": '\U000022A4',
+ "topbot;": '\U00002336',
+ "topcir;": '\U00002AF1',
+ "topf;": '\U0001D565',
+ "topfork;": '\U00002ADA',
+ "tosa;": '\U00002929',
+ "tprime;": '\U00002034',
+ "trade;": '\U00002122',
+ "triangle;": '\U000025B5',
+ "triangledown;": '\U000025BF',
+ "triangleleft;": '\U000025C3',
+ "trianglelefteq;": '\U000022B4',
+ "triangleq;": '\U0000225C',
+ "triangleright;": '\U000025B9',
+ "trianglerighteq;": '\U000022B5',
+ "tridot;": '\U000025EC',
+ "trie;": '\U0000225C',
+ "triminus;": '\U00002A3A',
+ "triplus;": '\U00002A39',
+ "trisb;": '\U000029CD',
+ "tritime;": '\U00002A3B',
+ "trpezium;": '\U000023E2',
+ "tscr;": '\U0001D4C9',
+ "tscy;": '\U00000446',
+ "tshcy;": '\U0000045B',
+ "tstrok;": '\U00000167',
+ "twixt;": '\U0000226C',
+ "twoheadleftarrow;": '\U0000219E',
+ "twoheadrightarrow;": '\U000021A0',
+ "uArr;": '\U000021D1',
+ "uHar;": '\U00002963',
+ "uacute;": '\U000000FA',
+ "uarr;": '\U00002191',
+ "ubrcy;": '\U0000045E',
+ "ubreve;": '\U0000016D',
+ "ucirc;": '\U000000FB',
+ "ucy;": '\U00000443',
+ "udarr;": '\U000021C5',
+ "udblac;": '\U00000171',
+ "udhar;": '\U0000296E',
+ "ufisht;": '\U0000297E',
+ "ufr;": '\U0001D532',
+ "ugrave;": '\U000000F9',
+ "uharl;": '\U000021BF',
+ "uharr;": '\U000021BE',
+ "uhblk;": '\U00002580',
+ "ulcorn;": '\U0000231C',
+ "ulcorner;": '\U0000231C',
+ "ulcrop;": '\U0000230F',
+ "ultri;": '\U000025F8',
+ "umacr;": '\U0000016B',
+ "uml;": '\U000000A8',
+ "uogon;": '\U00000173',
+ "uopf;": '\U0001D566',
+ "uparrow;": '\U00002191',
+ "updownarrow;": '\U00002195',
+ "upharpoonleft;": '\U000021BF',
+ "upharpoonright;": '\U000021BE',
+ "uplus;": '\U0000228E',
+ "upsi;": '\U000003C5',
+ "upsih;": '\U000003D2',
+ "upsilon;": '\U000003C5',
+ "upuparrows;": '\U000021C8',
+ "urcorn;": '\U0000231D',
+ "urcorner;": '\U0000231D',
+ "urcrop;": '\U0000230E',
+ "uring;": '\U0000016F',
+ "urtri;": '\U000025F9',
+ "uscr;": '\U0001D4CA',
+ "utdot;": '\U000022F0',
+ "utilde;": '\U00000169',
+ "utri;": '\U000025B5',
+ "utrif;": '\U000025B4',
+ "uuarr;": '\U000021C8',
+ "uuml;": '\U000000FC',
+ "uwangle;": '\U000029A7',
+ "vArr;": '\U000021D5',
+ "vBar;": '\U00002AE8',
+ "vBarv;": '\U00002AE9',
+ "vDash;": '\U000022A8',
+ "vangrt;": '\U0000299C',
+ "varepsilon;": '\U000003F5',
+ "varkappa;": '\U000003F0',
+ "varnothing;": '\U00002205',
+ "varphi;": '\U000003D5',
+ "varpi;": '\U000003D6',
+ "varpropto;": '\U0000221D',
+ "varr;": '\U00002195',
+ "varrho;": '\U000003F1',
+ "varsigma;": '\U000003C2',
+ "vartheta;": '\U000003D1',
+ "vartriangleleft;": '\U000022B2',
+ "vartriangleright;": '\U000022B3',
+ "vcy;": '\U00000432',
+ "vdash;": '\U000022A2',
+ "vee;": '\U00002228',
+ "veebar;": '\U000022BB',
+ "veeeq;": '\U0000225A',
+ "vellip;": '\U000022EE',
+ "verbar;": '\U0000007C',
+ "vert;": '\U0000007C',
+ "vfr;": '\U0001D533',
+ "vltri;": '\U000022B2',
+ "vopf;": '\U0001D567',
+ "vprop;": '\U0000221D',
+ "vrtri;": '\U000022B3',
+ "vscr;": '\U0001D4CB',
+ "vzigzag;": '\U0000299A',
+ "wcirc;": '\U00000175',
+ "wedbar;": '\U00002A5F',
+ "wedge;": '\U00002227',
+ "wedgeq;": '\U00002259',
+ "weierp;": '\U00002118',
+ "wfr;": '\U0001D534',
+ "wopf;": '\U0001D568',
+ "wp;": '\U00002118',
+ "wr;": '\U00002240',
+ "wreath;": '\U00002240',
+ "wscr;": '\U0001D4CC',
+ "xcap;": '\U000022C2',
+ "xcirc;": '\U000025EF',
+ "xcup;": '\U000022C3',
+ "xdtri;": '\U000025BD',
+ "xfr;": '\U0001D535',
+ "xhArr;": '\U000027FA',
+ "xharr;": '\U000027F7',
+ "xi;": '\U000003BE',
+ "xlArr;": '\U000027F8',
+ "xlarr;": '\U000027F5',
+ "xmap;": '\U000027FC',
+ "xnis;": '\U000022FB',
+ "xodot;": '\U00002A00',
+ "xopf;": '\U0001D569',
+ "xoplus;": '\U00002A01',
+ "xotime;": '\U00002A02',
+ "xrArr;": '\U000027F9',
+ "xrarr;": '\U000027F6',
+ "xscr;": '\U0001D4CD',
+ "xsqcup;": '\U00002A06',
+ "xuplus;": '\U00002A04',
+ "xutri;": '\U000025B3',
+ "xvee;": '\U000022C1',
+ "xwedge;": '\U000022C0',
+ "yacute;": '\U000000FD',
+ "yacy;": '\U0000044F',
+ "ycirc;": '\U00000177',
+ "ycy;": '\U0000044B',
+ "yen;": '\U000000A5',
+ "yfr;": '\U0001D536',
+ "yicy;": '\U00000457',
+ "yopf;": '\U0001D56A',
+ "yscr;": '\U0001D4CE',
+ "yucy;": '\U0000044E',
+ "yuml;": '\U000000FF',
+ "zacute;": '\U0000017A',
+ "zcaron;": '\U0000017E',
+ "zcy;": '\U00000437',
+ "zdot;": '\U0000017C',
+ "zeetrf;": '\U00002128',
+ "zeta;": '\U000003B6',
+ "zfr;": '\U0001D537',
+ "zhcy;": '\U00000436',
+ "zigrarr;": '\U000021DD',
+ "zopf;": '\U0001D56B',
+ "zscr;": '\U0001D4CF',
+ "zwj;": '\U0000200D',
+ "zwnj;": '\U0000200C',
+ "AElig": '\U000000C6',
+ "AMP": '\U00000026',
+ "Aacute": '\U000000C1',
+ "Acirc": '\U000000C2',
+ "Agrave": '\U000000C0',
+ "Aring": '\U000000C5',
+ "Atilde": '\U000000C3',
+ "Auml": '\U000000C4',
+ "COPY": '\U000000A9',
+ "Ccedil": '\U000000C7',
+ "ETH": '\U000000D0',
+ "Eacute": '\U000000C9',
+ "Ecirc": '\U000000CA',
+ "Egrave": '\U000000C8',
+ "Euml": '\U000000CB',
+ "GT": '\U0000003E',
+ "Iacute": '\U000000CD',
+ "Icirc": '\U000000CE',
+ "Igrave": '\U000000CC',
+ "Iuml": '\U000000CF',
+ "LT": '\U0000003C',
+ "Ntilde": '\U000000D1',
+ "Oacute": '\U000000D3',
+ "Ocirc": '\U000000D4',
+ "Ograve": '\U000000D2',
+ "Oslash": '\U000000D8',
+ "Otilde": '\U000000D5',
+ "Ouml": '\U000000D6',
+ "QUOT": '\U00000022',
+ "REG": '\U000000AE',
+ "THORN": '\U000000DE',
+ "Uacute": '\U000000DA',
+ "Ucirc": '\U000000DB',
+ "Ugrave": '\U000000D9',
+ "Uuml": '\U000000DC',
+ "Yacute": '\U000000DD',
+ "aacute": '\U000000E1',
+ "acirc": '\U000000E2',
+ "acute": '\U000000B4',
+ "aelig": '\U000000E6',
+ "agrave": '\U000000E0',
+ "amp": '\U00000026',
+ "aring": '\U000000E5',
+ "atilde": '\U000000E3',
+ "auml": '\U000000E4',
+ "brvbar": '\U000000A6',
+ "ccedil": '\U000000E7',
+ "cedil": '\U000000B8',
+ "cent": '\U000000A2',
+ "copy": '\U000000A9',
+ "curren": '\U000000A4',
+ "deg": '\U000000B0',
+ "divide": '\U000000F7',
+ "eacute": '\U000000E9',
+ "ecirc": '\U000000EA',
+ "egrave": '\U000000E8',
+ "eth": '\U000000F0',
+ "euml": '\U000000EB',
+ "frac12": '\U000000BD',
+ "frac14": '\U000000BC',
+ "frac34": '\U000000BE',
+ "gt": '\U0000003E',
+ "iacute": '\U000000ED',
+ "icirc": '\U000000EE',
+ "iexcl": '\U000000A1',
+ "igrave": '\U000000EC',
+ "iquest": '\U000000BF',
+ "iuml": '\U000000EF',
+ "laquo": '\U000000AB',
+ "lt": '\U0000003C',
+ "macr": '\U000000AF',
+ "micro": '\U000000B5',
+ "middot": '\U000000B7',
+ "nbsp": '\U000000A0',
+ "not": '\U000000AC',
+ "ntilde": '\U000000F1',
+ "oacute": '\U000000F3',
+ "ocirc": '\U000000F4',
+ "ograve": '\U000000F2',
+ "ordf": '\U000000AA',
+ "ordm": '\U000000BA',
+ "oslash": '\U000000F8',
+ "otilde": '\U000000F5',
+ "ouml": '\U000000F6',
+ "para": '\U000000B6',
+ "plusmn": '\U000000B1',
+ "pound": '\U000000A3',
+ "quot": '\U00000022',
+ "raquo": '\U000000BB',
+ "reg": '\U000000AE',
+ "sect": '\U000000A7',
+ "shy": '\U000000AD',
+ "sup1": '\U000000B9',
+ "sup2": '\U000000B2',
+ "sup3": '\U000000B3',
+ "szlig": '\U000000DF',
+ "thorn": '\U000000FE',
+ "times": '\U000000D7',
+ "uacute": '\U000000FA',
+ "ucirc": '\U000000FB',
+ "ugrave": '\U000000F9',
+ "uml": '\U000000A8',
+ "uuml": '\U000000FC',
+ "yacute": '\U000000FD',
+ "yen": '\U000000A5',
+ "yuml": '\U000000FF',
+}
+
+// HTML entities that are two unicode codepoints.
+var entity2 = map[string][2]rune{
+ // TODO(nigeltao): Handle replacements that are wider than their names.
+ // "nLt;": {'\u226A', '\u20D2'},
+ // "nGt;": {'\u226B', '\u20D2'},
+ "NotEqualTilde;": {'\u2242', '\u0338'},
+ "NotGreaterFullEqual;": {'\u2267', '\u0338'},
+ "NotGreaterGreater;": {'\u226B', '\u0338'},
+ "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
+ "NotHumpDownHump;": {'\u224E', '\u0338'},
+ "NotHumpEqual;": {'\u224F', '\u0338'},
+ "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
+ "NotLessLess;": {'\u226A', '\u0338'},
+ "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
+ "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
+ "NotNestedLessLess;": {'\u2AA1', '\u0338'},
+ "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
+ "NotRightTriangleBar;": {'\u29D0', '\u0338'},
+ "NotSquareSubset;": {'\u228F', '\u0338'},
+ "NotSquareSuperset;": {'\u2290', '\u0338'},
+ "NotSubset;": {'\u2282', '\u20D2'},
+ "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
+ "NotSucceedsTilde;": {'\u227F', '\u0338'},
+ "NotSuperset;": {'\u2283', '\u20D2'},
+ "ThickSpace;": {'\u205F', '\u200A'},
+ "acE;": {'\u223E', '\u0333'},
+ "bne;": {'\u003D', '\u20E5'},
+ "bnequiv;": {'\u2261', '\u20E5'},
+ "caps;": {'\u2229', '\uFE00'},
+ "cups;": {'\u222A', '\uFE00'},
+ "fjlig;": {'\u0066', '\u006A'},
+ "gesl;": {'\u22DB', '\uFE00'},
+ "gvertneqq;": {'\u2269', '\uFE00'},
+ "gvnE;": {'\u2269', '\uFE00'},
+ "lates;": {'\u2AAD', '\uFE00'},
+ "lesg;": {'\u22DA', '\uFE00'},
+ "lvertneqq;": {'\u2268', '\uFE00'},
+ "lvnE;": {'\u2268', '\uFE00'},
+ "nGg;": {'\u22D9', '\u0338'},
+ "nGtv;": {'\u226B', '\u0338'},
+ "nLl;": {'\u22D8', '\u0338'},
+ "nLtv;": {'\u226A', '\u0338'},
+ "nang;": {'\u2220', '\u20D2'},
+ "napE;": {'\u2A70', '\u0338'},
+ "napid;": {'\u224B', '\u0338'},
+ "nbump;": {'\u224E', '\u0338'},
+ "nbumpe;": {'\u224F', '\u0338'},
+ "ncongdot;": {'\u2A6D', '\u0338'},
+ "nedot;": {'\u2250', '\u0338'},
+ "nesim;": {'\u2242', '\u0338'},
+ "ngE;": {'\u2267', '\u0338'},
+ "ngeqq;": {'\u2267', '\u0338'},
+ "ngeqslant;": {'\u2A7E', '\u0338'},
+ "nges;": {'\u2A7E', '\u0338'},
+ "nlE;": {'\u2266', '\u0338'},
+ "nleqq;": {'\u2266', '\u0338'},
+ "nleqslant;": {'\u2A7D', '\u0338'},
+ "nles;": {'\u2A7D', '\u0338'},
+ "notinE;": {'\u22F9', '\u0338'},
+ "notindot;": {'\u22F5', '\u0338'},
+ "nparsl;": {'\u2AFD', '\u20E5'},
+ "npart;": {'\u2202', '\u0338'},
+ "npre;": {'\u2AAF', '\u0338'},
+ "npreceq;": {'\u2AAF', '\u0338'},
+ "nrarrc;": {'\u2933', '\u0338'},
+ "nrarrw;": {'\u219D', '\u0338'},
+ "nsce;": {'\u2AB0', '\u0338'},
+ "nsubE;": {'\u2AC5', '\u0338'},
+ "nsubset;": {'\u2282', '\u20D2'},
+ "nsubseteqq;": {'\u2AC5', '\u0338'},
+ "nsucceq;": {'\u2AB0', '\u0338'},
+ "nsupE;": {'\u2AC6', '\u0338'},
+ "nsupset;": {'\u2283', '\u20D2'},
+ "nsupseteqq;": {'\u2AC6', '\u0338'},
+ "nvap;": {'\u224D', '\u20D2'},
+ "nvge;": {'\u2265', '\u20D2'},
+ "nvgt;": {'\u003E', '\u20D2'},
+ "nvle;": {'\u2264', '\u20D2'},
+ "nvlt;": {'\u003C', '\u20D2'},
+ "nvltrie;": {'\u22B4', '\u20D2'},
+ "nvrtrie;": {'\u22B5', '\u20D2'},
+ "nvsim;": {'\u223C', '\u20D2'},
+ "race;": {'\u223D', '\u0331'},
+ "smtes;": {'\u2AAC', '\uFE00'},
+ "sqcaps;": {'\u2293', '\uFE00'},
+ "sqcups;": {'\u2294', '\uFE00'},
+ "varsubsetneq;": {'\u228A', '\uFE00'},
+ "varsubsetneqq;": {'\u2ACB', '\uFE00'},
+ "varsupsetneq;": {'\u228B', '\uFE00'},
+ "varsupsetneqq;": {'\u2ACC', '\uFE00'},
+ "vnsub;": {'\u2282', '\u20D2'},
+ "vnsup;": {'\u2283', '\u20D2'},
+ "vsubnE;": {'\u2ACB', '\uFE00'},
+ "vsubne;": {'\u228A', '\uFE00'},
+ "vsupnE;": {'\u2ACC', '\uFE00'},
+ "vsupne;": {'\u228B', '\uFE00'},
+}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
new file mode 100644
index 0000000..04c6bec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -0,0 +1,339 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+)
+
+// These replacements permit compatibility with old numeric entities that
+// assumed Windows-1252 encoding.
+// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+var replacementTable = [...]rune{
+ '\u20AC', // First entry is what 0x80 should be replaced with.
+ '\u0081',
+ '\u201A',
+ '\u0192',
+ '\u201E',
+ '\u2026',
+ '\u2020',
+ '\u2021',
+ '\u02C6',
+ '\u2030',
+ '\u0160',
+ '\u2039',
+ '\u0152',
+ '\u008D',
+ '\u017D',
+ '\u008F',
+ '\u0090',
+ '\u2018',
+ '\u2019',
+ '\u201C',
+ '\u201D',
+ '\u2022',
+ '\u2013',
+ '\u2014',
+ '\u02DC',
+ '\u2122',
+ '\u0161',
+ '\u203A',
+ '\u0153',
+ '\u009D',
+ '\u017E',
+ '\u0178', // Last entry is 0x9F.
+ // 0x00->'\uFFFD' is handled programmatically.
+ // 0x0D->'\u000D' is a no-op.
+}
+
+// unescapeEntity reads an entity like "<" from b[src:] and writes the
+// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
+// Precondition: b[src] == '&' && dst <= src.
+// attribute should be true if parsing an attribute value.
+func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
+ // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+
+ // i starts at 1 because we already know that s[0] == '&'.
+ i, s := 1, b[src:]
+
+ if len(s) <= 1 {
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if s[i] == '#' {
+ if len(s) <= 3 { // We need to have at least ".".
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+ i++
+ c := s[i]
+ hex := false
+ if c == 'x' || c == 'X' {
+ hex = true
+ i++
+ }
+
+ x := '\x00'
+ for i < len(s) {
+ c = s[i]
+ i++
+ if hex {
+ if '0' <= c && c <= '9' {
+ x = 16*x + rune(c) - '0'
+ continue
+ } else if 'a' <= c && c <= 'f' {
+ x = 16*x + rune(c) - 'a' + 10
+ continue
+ } else if 'A' <= c && c <= 'F' {
+ x = 16*x + rune(c) - 'A' + 10
+ continue
+ }
+ } else if '0' <= c && c <= '9' {
+ x = 10*x + rune(c) - '0'
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ if i <= 3 { // No characters matched.
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if 0x80 <= x && x <= 0x9F {
+ // Replace characters from Windows-1252 with UTF-8 equivalents.
+ x = replacementTable[x-0x80]
+ } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
+ // Replace invalid characters with the replacement character.
+ x = '\uFFFD'
+ }
+
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ }
+
+ // Consume the maximum number of characters possible, with the
+ // consumed characters matching one of the named references.
+
+ for i < len(s) {
+ c := s[i]
+ i++
+ // Lower-cased characters are more common in entities, so we check for them first.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ entityName := string(s[1:i])
+ if entityName == "" {
+ // No-op.
+ } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
+ // No-op.
+ } else if x := entity[entityName]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ } else if x := entity2[entityName]; x[0] != 0 {
+ dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
+ return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
+ } else if !attribute {
+ maxLen := len(entityName) - 1
+ if maxLen > longestEntityWithoutSemicolon {
+ maxLen = longestEntityWithoutSemicolon
+ }
+ for j := maxLen; j > 1; j-- {
+ if x := entity[entityName[:j]]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
+ }
+ }
+ }
+
+ dst1, src1 = dst+i, src+i
+ copy(b[dst:dst1], b[src:src1])
+ return dst1, src1
+}
+
+// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
+ return err
+ }
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("')
+ case RawNode:
+ _, err := w.WriteString(n.Data)
+ return err
+ default:
+ return errors.New("html: unknown node type")
+ }
+
+ // Render the opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if a.Namespace != "" {
+ if _, err := w.WriteString(a.Namespace); err != nil {
+ return err
+ }
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if n.FirstChild != nil {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Add initial newline where there is danger of a newline beging ignored.
+ if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
+ switch n.Data {
+ case "pre", "listing", "textarea":
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render any child nodes
+ if childTextNodesAreLiteral(n) {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == TextNode {
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ } else {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+ if n.Data == "plaintext" {
+ // Don't render anything else. must be the
+ // last element in the file, with no closing tag.
+ return plaintextAbort
+ }
+ } else {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the closing tag.
+ if _, err := w.WriteString(""); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+func childTextNodesAreLiteral(n *Node) bool {
+ // Per WHATWG HTML 13.3, if the parent of the current node is a style,
+ // script, xmp, iframe, noembed, noframes, or plaintext element, and the
+ // current node is a text node, append the value of the node's data
+ // literally. The specification is not explicit about it, but we only
+ // enforce this if we are in the HTML namespace (i.e. when the namespace is
+ // "").
+ // NOTE: we also always include noscript elements, although the
+ // specification states that they should only be rendered as such if
+ // scripting is enabled for the node (which is not something we track).
+ if n.Namespace != "" {
+ return false
+ }
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ return true
+ default:
+ return false
+ }
+}
+
+// writeQuoted writes s to w surrounded by quotes. Normally it will use double
+// quotes, but if s contains a double quote, it will use single quotes.
+// It is used for writing the identifiers in a doctype declaration.
+// In valid HTML, they can't contain both types of quotes.
+func writeQuoted(w writer, s string) error {
+ var q byte = '"'
+ if strings.Contains(s, `"`) {
+ q = '\''
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(s); err != nil {
+ return err
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Section 12.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
new file mode 100644
index 0000000..3c57880
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token.go
@@ -0,0 +1,1272 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html/atom"
+)
+
+// A TokenType is the type of a Token.
+type TokenType uint32
+
+const (
+ // ErrorToken means that an error occurred during tokenization.
+ ErrorToken TokenType = iota
+ // TextToken means a text node.
+ TextToken
+ // A StartTagToken looks like .
+ StartTagToken
+ // An EndTagToken looks like .
+ EndTagToken
+ // A SelfClosingTagToken tag looks like .
+ SelfClosingTagToken
+ // A CommentToken looks like .
+ CommentToken
+ // A DoctypeToken looks like
+ DoctypeToken
+)
+
+// ErrBufferExceeded means that the buffering limit was exceeded.
+var ErrBufferExceeded = errors.New("max buffer exceeded")
+
+// String returns a string representation of the TokenType.
+func (t TokenType) String() string {
+ switch t {
+ case ErrorToken:
+ return "Error"
+ case TextToken:
+ return "Text"
+ case StartTagToken:
+ return "StartTag"
+ case EndTagToken:
+ return "EndTag"
+ case SelfClosingTagToken:
+ return "SelfClosingTag"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ }
+ return "Invalid(" + strconv.Itoa(int(t)) + ")"
+}
+
+// An Attribute is an attribute namespace-key-value triple. Namespace is
+// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
+// does not contain escapable characters like '&', '<' or '>'), and Val is
+// unescaped (it looks like "a"
+ case EndTagToken:
+ return "" + t.tagString() + ">"
+ case SelfClosingTagToken:
+ return "<" + t.tagString() + "/>"
+ case CommentToken:
+ return ""
+ case DoctypeToken:
+ return ""
+ }
+ return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
+}
+
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
+// A Tokenizer returns a stream of HTML Tokens.
+type Tokenizer struct {
+ // r is the source of the HTML text.
+ r io.Reader
+ // tt is the TokenType of the current token.
+ tt TokenType
+ // err is the first error encountered during tokenization. It is possible
+ // for tt != Error && err != nil to hold: this means that Next returned a
+ // valid token but the subsequent Next call will return an error token.
+ // For example, if the HTML text input was just "plain", then the first
+ // Next call would set z.err to io.EOF but return a TextToken, and all
+ // subsequent Next calls would return an ErrorToken.
+ // err is never reset. Once it becomes non-nil, it stays non-nil.
+ err error
+ // readErr is the error returned by the io.Reader r. It is separate from
+ // err because it is valid for an io.Reader to return (n int, err1 error)
+ // such that n > 0 && err1 != nil, and callers should always process the
+ // n > 0 bytes before considering the error err1.
+ readErr error
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
+ maxBuf int
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "
" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an