@ -0,0 +1,21 @@ | |||
The MIT License (MIT) | |||
Copyright (c) 2013 TOML authors | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in | |||
all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
THE SOFTWARE. |
@ -0,0 +1,21 @@ | |||
The MIT License (MIT) | |||
Copyright (c) 2013 TOML authors | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in | |||
all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
THE SOFTWARE. |
@ -0,0 +1,21 @@ | |||
The MIT License (MIT) | |||
Copyright (c) 2013 TOML authors | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in | |||
all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
THE SOFTWARE. |
@ -0,0 +1,21 @@ | |||
The MIT License (MIT) | |||
Copyright (c) 2013 TOML authors | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in | |||
all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
THE SOFTWARE. |
@ -0,0 +1,509 @@ | |||
package toml | |||
import ( | |||
"fmt" | |||
"io" | |||
"io/ioutil" | |||
"math" | |||
"reflect" | |||
"strings" | |||
"time" | |||
) | |||
func e(format string, args ...interface{}) error { | |||
return fmt.Errorf("toml: "+format, args...) | |||
} | |||
// Unmarshaler is the interface implemented by objects that can unmarshal a | |||
// TOML description of themselves. | |||
type Unmarshaler interface { | |||
UnmarshalTOML(interface{}) error | |||
} | |||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. | |||
func Unmarshal(p []byte, v interface{}) error { | |||
_, err := Decode(string(p), v) | |||
return err | |||
} | |||
// Primitive is a TOML value that hasn't been decoded into a Go value. | |||
// When using the various `Decode*` functions, the type `Primitive` may | |||
// be given to any value, and its decoding will be delayed. | |||
// | |||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function. | |||
// | |||
// The underlying representation of a `Primitive` value is subject to change. | |||
// Do not rely on it. | |||
// | |||
// N.B. Primitive values are still parsed, so using them will only avoid | |||
// the overhead of reflection. They can be useful when you don't know the | |||
// exact type of TOML data until run time. | |||
type Primitive struct { | |||
undecoded interface{} | |||
context Key | |||
} | |||
// DEPRECATED! | |||
// | |||
// Use MetaData.PrimitiveDecode instead. | |||
func PrimitiveDecode(primValue Primitive, v interface{}) error { | |||
md := MetaData{decoded: make(map[string]bool)} | |||
return md.unify(primValue.undecoded, rvalue(v)) | |||
} | |||
// PrimitiveDecode is just like the other `Decode*` functions, except it | |||
// decodes a TOML value that has already been parsed. Valid primitive values | |||
// can *only* be obtained from values filled by the decoder functions, | |||
// including this method. (i.e., `v` may contain more `Primitive` | |||
// values.) | |||
// | |||
// Meta data for primitive values is included in the meta data returned by | |||
// the `Decode*` functions with one exception: keys returned by the Undecoded | |||
// method will only reflect keys that were decoded. Namely, any keys hidden | |||
// behind a Primitive will be considered undecoded. Executing this method will | |||
// update the undecoded keys in the meta data. (See the example.) | |||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { | |||
md.context = primValue.context | |||
defer func() { md.context = nil }() | |||
return md.unify(primValue.undecoded, rvalue(v)) | |||
} | |||
// Decode will decode the contents of `data` in TOML format into a pointer | |||
// `v`. | |||
// | |||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be | |||
// used interchangeably.) | |||
// | |||
// TOML arrays of tables correspond to either a slice of structs or a slice | |||
// of maps. | |||
// | |||
// TOML datetimes correspond to Go `time.Time` values. | |||
// | |||
// All other TOML types (float, string, int, bool and array) correspond | |||
// to the obvious Go types. | |||
// | |||
// An exception to the above rules is if a type implements the | |||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value | |||
// (floats, strings, integers, booleans and datetimes) will be converted to | |||
// a byte string and given to the value's UnmarshalText method. See the | |||
// Unmarshaler example for a demonstration with time duration strings. | |||
// | |||
// Key mapping | |||
// | |||
// TOML keys can map to either keys in a Go map or field names in a Go | |||
// struct. The special `toml` struct tag may be used to map TOML keys to | |||
// struct fields that don't match the key name exactly. (See the example.) | |||
// A case insensitive match to struct names will be tried if an exact match | |||
// can't be found. | |||
// | |||
// The mapping between TOML values and Go values is loose. That is, there | |||
// may exist TOML values that cannot be placed into your representation, and | |||
// there may be parts of your representation that do not correspond to | |||
// TOML values. This loose mapping can be made stricter by using the IsDefined | |||
// and/or Undecoded methods on the MetaData returned. | |||
// | |||
// This decoder will not handle cyclic types. If a cyclic type is passed, | |||
// `Decode` will not terminate. | |||
func Decode(data string, v interface{}) (MetaData, error) { | |||
rv := reflect.ValueOf(v) | |||
if rv.Kind() != reflect.Ptr { | |||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) | |||
} | |||
if rv.IsNil() { | |||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) | |||
} | |||
p, err := parse(data) | |||
if err != nil { | |||
return MetaData{}, err | |||
} | |||
md := MetaData{ | |||
p.mapping, p.types, p.ordered, | |||
make(map[string]bool, len(p.ordered)), nil, | |||
} | |||
return md, md.unify(p.mapping, indirect(rv)) | |||
} | |||
// DecodeFile is just like Decode, except it will automatically read the | |||
// contents of the file at `fpath` and decode it for you. | |||
func DecodeFile(fpath string, v interface{}) (MetaData, error) { | |||
bs, err := ioutil.ReadFile(fpath) | |||
if err != nil { | |||
return MetaData{}, err | |||
} | |||
return Decode(string(bs), v) | |||
} | |||
// DecodeReader is just like Decode, except it will consume all bytes | |||
// from the reader and decode it for you. | |||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { | |||
bs, err := ioutil.ReadAll(r) | |||
if err != nil { | |||
return MetaData{}, err | |||
} | |||
return Decode(string(bs), v) | |||
} | |||
// unify performs a sort of type unification based on the structure of `rv`, | |||
// which is the client representation. | |||
// | |||
// Any type mismatch produces an error. Finding a type that we don't know | |||
// how to handle produces an unsupported type error. | |||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error { | |||
// Special case. Look for a `Primitive` value. | |||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { | |||
// Save the undecoded data and the key context into the primitive | |||
// value. | |||
context := make(Key, len(md.context)) | |||
copy(context, md.context) | |||
rv.Set(reflect.ValueOf(Primitive{ | |||
undecoded: data, | |||
context: context, | |||
})) | |||
return nil | |||
} | |||
// Special case. Unmarshaler Interface support. | |||
if rv.CanAddr() { | |||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok { | |||
return v.UnmarshalTOML(data) | |||
} | |||
} | |||
// Special case. Handle time.Time values specifically. | |||
// TODO: Remove this code when we decide to drop support for Go 1.1. | |||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding | |||
// interfaces. | |||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { | |||
return md.unifyDatetime(data, rv) | |||
} | |||
// Special case. Look for a value satisfying the TextUnmarshaler interface. | |||
if v, ok := rv.Interface().(TextUnmarshaler); ok { | |||
return md.unifyText(data, v) | |||
} | |||
// BUG(burntsushi) | |||
// The behavior here is incorrect whenever a Go type satisfies the | |||
// encoding.TextUnmarshaler interface but also corresponds to a TOML | |||
// hash or array. In particular, the unmarshaler should only be applied | |||
// to primitive TOML values. But at this point, it will be applied to | |||
// all kinds of values and produce an incorrect error whenever those values | |||
// are hashes or arrays (including arrays of tables). | |||
k := rv.Kind() | |||
// laziness | |||
if k >= reflect.Int && k <= reflect.Uint64 { | |||
return md.unifyInt(data, rv) | |||
} | |||
switch k { | |||
case reflect.Ptr: | |||
elem := reflect.New(rv.Type().Elem()) | |||
err := md.unify(data, reflect.Indirect(elem)) | |||
if err != nil { | |||
return err | |||
} | |||
rv.Set(elem) | |||
return nil | |||
case reflect.Struct: | |||
return md.unifyStruct(data, rv) | |||
case reflect.Map: | |||
return md.unifyMap(data, rv) | |||
case reflect.Array: | |||
return md.unifyArray(data, rv) | |||
case reflect.Slice: | |||
return md.unifySlice(data, rv) | |||
case reflect.String: | |||
return md.unifyString(data, rv) | |||
case reflect.Bool: | |||
return md.unifyBool(data, rv) | |||
case reflect.Interface: | |||
// we only support empty interfaces. | |||
if rv.NumMethod() > 0 { | |||
return e("unsupported type %s", rv.Type()) | |||
} | |||
return md.unifyAnything(data, rv) | |||
case reflect.Float32: | |||
fallthrough | |||
case reflect.Float64: | |||
return md.unifyFloat64(data, rv) | |||
} | |||
return e("unsupported type %s", rv.Kind()) | |||
} | |||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { | |||
tmap, ok := mapping.(map[string]interface{}) | |||
if !ok { | |||
if mapping == nil { | |||
return nil | |||
} | |||
return e("type mismatch for %s: expected table but found %T", | |||
rv.Type().String(), mapping) | |||
} | |||
for key, datum := range tmap { | |||
var f *field | |||
fields := cachedTypeFields(rv.Type()) | |||
for i := range fields { | |||
ff := &fields[i] | |||
if ff.name == key { | |||
f = ff | |||
break | |||
} | |||
if f == nil && strings.EqualFold(ff.name, key) { | |||
f = ff | |||
} | |||
} | |||
if f != nil { | |||
subv := rv | |||
for _, i := range f.index { | |||
subv = indirect(subv.Field(i)) | |||
} | |||
if isUnifiable(subv) { | |||
md.decoded[md.context.add(key).String()] = true | |||
md.context = append(md.context, key) | |||
if err := md.unify(datum, subv); err != nil { | |||
return err | |||
} | |||
md.context = md.context[0 : len(md.context)-1] | |||
} else if f.name != "" { | |||
// Bad user! No soup for you! | |||
return e("cannot write unexported field %s.%s", | |||
rv.Type().String(), f.name) | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { | |||
tmap, ok := mapping.(map[string]interface{}) | |||
if !ok { | |||
if tmap == nil { | |||
return nil | |||
} | |||
return badtype("map", mapping) | |||
} | |||
if rv.IsNil() { | |||
rv.Set(reflect.MakeMap(rv.Type())) | |||
} | |||
for k, v := range tmap { | |||
md.decoded[md.context.add(k).String()] = true | |||
md.context = append(md.context, k) | |||
rvkey := indirect(reflect.New(rv.Type().Key())) | |||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) | |||
if err := md.unify(v, rvval); err != nil { | |||
return err | |||
} | |||
md.context = md.context[0 : len(md.context)-1] | |||
rvkey.SetString(k) | |||
rv.SetMapIndex(rvkey, rvval) | |||
} | |||
return nil | |||
} | |||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { | |||
datav := reflect.ValueOf(data) | |||
if datav.Kind() != reflect.Slice { | |||
if !datav.IsValid() { | |||
return nil | |||
} | |||
return badtype("slice", data) | |||
} | |||
sliceLen := datav.Len() | |||
if sliceLen != rv.Len() { | |||
return e("expected array length %d; got TOML array of length %d", | |||
rv.Len(), sliceLen) | |||
} | |||
return md.unifySliceArray(datav, rv) | |||
} | |||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { | |||
datav := reflect.ValueOf(data) | |||
if datav.Kind() != reflect.Slice { | |||
if !datav.IsValid() { | |||
return nil | |||
} | |||
return badtype("slice", data) | |||
} | |||
n := datav.Len() | |||
if rv.IsNil() || rv.Cap() < n { | |||
rv.Set(reflect.MakeSlice(rv.Type(), n, n)) | |||
} | |||
rv.SetLen(n) | |||
return md.unifySliceArray(datav, rv) | |||
} | |||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { | |||
sliceLen := data.Len() | |||
for i := 0; i < sliceLen; i++ { | |||
v := data.Index(i).Interface() | |||
sliceval := indirect(rv.Index(i)) | |||
if err := md.unify(v, sliceval); err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { | |||
if _, ok := data.(time.Time); ok { | |||
rv.Set(reflect.ValueOf(data)) | |||
return nil | |||
} | |||
return badtype("time.Time", data) | |||
} | |||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { | |||
if s, ok := data.(string); ok { | |||
rv.SetString(s) | |||
return nil | |||
} | |||
return badtype("string", data) | |||
} | |||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { | |||
if num, ok := data.(float64); ok { | |||
switch rv.Kind() { | |||
case reflect.Float32: | |||
fallthrough | |||
case reflect.Float64: | |||
rv.SetFloat(num) | |||
default: | |||
panic("bug") | |||
} | |||
return nil | |||
} | |||
return badtype("float", data) | |||
} | |||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { | |||
if num, ok := data.(int64); ok { | |||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { | |||
switch rv.Kind() { | |||
case reflect.Int, reflect.Int64: | |||
// No bounds checking necessary. | |||
case reflect.Int8: | |||
if num < math.MinInt8 || num > math.MaxInt8 { | |||
return e("value %d is out of range for int8", num) | |||
} | |||
case reflect.Int16: | |||
if num < math.MinInt16 || num > math.MaxInt16 { | |||
return e("value %d is out of range for int16", num) | |||
} | |||
case reflect.Int32: | |||
if num < math.MinInt32 || num > math.MaxInt32 { | |||
return e("value %d is out of range for int32", num) | |||
} | |||
} | |||
rv.SetInt(num) | |||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { | |||
unum := uint64(num) | |||
switch rv.Kind() { | |||
case reflect.Uint, reflect.Uint64: | |||
// No bounds checking necessary. | |||
case reflect.Uint8: | |||
if num < 0 || unum > math.MaxUint8 { | |||
return e("value %d is out of range for uint8", num) | |||
} | |||
case reflect.Uint16: | |||
if num < 0 || unum > math.MaxUint16 { | |||
return e("value %d is out of range for uint16", num) | |||
} | |||
case reflect.Uint32: | |||
if num < 0 || unum > math.MaxUint32 { | |||
return e("value %d is out of range for uint32", num) | |||
} | |||
} | |||
rv.SetUint(unum) | |||
} else { | |||
panic("unreachable") | |||
} | |||
return nil | |||
} | |||
return badtype("integer", data) | |||
} | |||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { | |||
if b, ok := data.(bool); ok { | |||
rv.SetBool(b) | |||
return nil | |||
} | |||
return badtype("boolean", data) | |||
} | |||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { | |||
rv.Set(reflect.ValueOf(data)) | |||
return nil | |||
} | |||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { | |||
var s string | |||
switch sdata := data.(type) { | |||
case TextMarshaler: | |||
text, err := sdata.MarshalText() | |||
if err != nil { | |||
return err | |||
} | |||
s = string(text) | |||
case fmt.Stringer: | |||
s = sdata.String() | |||
case string: | |||
s = sdata | |||
case bool: | |||
s = fmt.Sprintf("%v", sdata) | |||
case int64: | |||
s = fmt.Sprintf("%d", sdata) | |||
case float64: | |||
s = fmt.Sprintf("%f", sdata) | |||
default: | |||
return badtype("primitive (string-like)", data) | |||
} | |||
if err := v.UnmarshalText([]byte(s)); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
// rvalue returns a reflect.Value of `v`. All pointers are resolved. | |||
func rvalue(v interface{}) reflect.Value { | |||
return indirect(reflect.ValueOf(v)) | |||
} | |||
// indirect returns the value pointed to by a pointer. | |||
// Pointers are followed until the value is not a pointer. | |||
// New values are allocated for each nil pointer. | |||
// | |||
// An exception to this rule is if the value satisfies an interface of | |||
// interest to us (like encoding.TextUnmarshaler). | |||
func indirect(v reflect.Value) reflect.Value { | |||
if v.Kind() != reflect.Ptr { | |||
if v.CanSet() { | |||
pv := v.Addr() | |||
if _, ok := pv.Interface().(TextUnmarshaler); ok { | |||
return pv | |||
} | |||
} | |||
return v | |||
} | |||
if v.IsNil() { | |||
v.Set(reflect.New(v.Type().Elem())) | |||
} | |||
return indirect(reflect.Indirect(v)) | |||
} | |||
func isUnifiable(rv reflect.Value) bool { | |||
if rv.CanSet() { | |||
return true | |||
} | |||
if _, ok := rv.Interface().(TextUnmarshaler); ok { | |||
return true | |||
} | |||
return false | |||
} | |||
func badtype(expected string, data interface{}) error { | |||
return e("cannot load TOML value of type %T into a Go %s", data, expected) | |||
} |
@ -0,0 +1,121 @@ | |||
package toml | |||
import "strings" | |||
// MetaData allows access to meta information about TOML data that may not | |||
// be inferrable via reflection. In particular, whether a key has been defined | |||
// and the TOML type of a key. | |||
type MetaData struct { | |||
mapping map[string]interface{} | |||
types map[string]tomlType | |||
keys []Key | |||
decoded map[string]bool | |||
context Key // Used only during decoding. | |||
} | |||
// IsDefined returns true if the key given exists in the TOML data. The key | |||
// should be specified hierarchially. e.g., | |||
// | |||
// // access the TOML key 'a.b.c' | |||
// IsDefined("a", "b", "c") | |||
// | |||
// IsDefined will return false if an empty key given. Keys are case sensitive. | |||
func (md *MetaData) IsDefined(key ...string) bool { | |||
if len(key) == 0 { | |||
return false | |||
} | |||
var hash map[string]interface{} | |||
var ok bool | |||
var hashOrVal interface{} = md.mapping | |||
for _, k := range key { | |||
if hash, ok = hashOrVal.(map[string]interface{}); !ok { | |||
return false | |||
} | |||
if hashOrVal, ok = hash[k]; !ok { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// Type returns a string representation of the type of the key specified. | |||
// | |||
// Type will return the empty string if given an empty key or a key that | |||
// does not exist. Keys are case sensitive. | |||
func (md *MetaData) Type(key ...string) string { | |||
fullkey := strings.Join(key, ".") | |||
if typ, ok := md.types[fullkey]; ok { | |||
return typ.typeString() | |||
} | |||
return "" | |||
} | |||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys | |||
// to get values of this type. | |||
type Key []string | |||
func (k Key) String() string { | |||
return strings.Join(k, ".") | |||
} | |||
func (k Key) maybeQuotedAll() string { | |||
var ss []string | |||
for i := range k { | |||
ss = append(ss, k.maybeQuoted(i)) | |||
} | |||
return strings.Join(ss, ".") | |||
} | |||
func (k Key) maybeQuoted(i int) string { | |||
quote := false | |||
for _, c := range k[i] { | |||
if !isBareKeyChar(c) { | |||
quote = true | |||
break | |||
} | |||
} | |||
if quote { | |||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" | |||
} | |||
return k[i] | |||
} | |||
func (k Key) add(piece string) Key { | |||
newKey := make(Key, len(k)+1) | |||
copy(newKey, k) | |||
newKey[len(k)] = piece | |||
return newKey | |||
} | |||
// Keys returns a slice of every key in the TOML data, including key groups. | |||
// Each key is itself a slice, where the first element is the top of the | |||
// hierarchy and the last is the most specific. | |||
// | |||
// The list will have the same order as the keys appeared in the TOML data. | |||
// | |||
// All keys returned are non-empty. | |||
func (md *MetaData) Keys() []Key { | |||
return md.keys | |||
} | |||
// Undecoded returns all keys that have not been decoded in the order in which | |||
// they appear in the original TOML document. | |||
// | |||
// This includes keys that haven't been decoded because of a Primitive value. | |||
// Once the Primitive value is decoded, the keys will be considered decoded. | |||
// | |||
// Also note that decoding into an empty interface will result in no decoding, | |||
// and so no keys will be considered decoded. | |||
// | |||
// In this sense, the Undecoded keys correspond to keys in the TOML document | |||
// that do not have a concrete type in your representation. | |||
func (md *MetaData) Undecoded() []Key { | |||
undecoded := make([]Key, 0, len(md.keys)) | |||
for _, key := range md.keys { | |||
if !md.decoded[key.String()] { | |||
undecoded = append(undecoded, key) | |||
} | |||
} | |||
return undecoded | |||
} |
@ -0,0 +1,27 @@ | |||
/* | |||
Package toml provides facilities for decoding and encoding TOML configuration | |||
files via reflection. There is also support for delaying decoding with | |||
the Primitive type, and querying the set of keys in a TOML document with the | |||
MetaData type. | |||
The specification implemented: https://github.com/toml-lang/toml | |||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify | |||
whether a file is a valid TOML document. It can also be used to print the | |||
type of each key in a TOML document. | |||
Testing | |||
There are two important types of tests used for this package. The first is | |||
contained inside '*_test.go' files and uses the standard Go unit testing | |||
framework. These tests are primarily devoted to holistically testing the | |||
decoder and encoder. | |||
The second type of testing is used to verify the implementation's adherence | |||
to the TOML specification. These tests have been factored into their own | |||
project: https://github.com/BurntSushi/toml-test | |||
The reason the tests are in a separate project is so that they can be used by | |||
any implementation of TOML. Namely, it is language agnostic. | |||
*/ | |||
package toml |
@ -0,0 +1,568 @@ | |||
package toml | |||
import ( | |||
"bufio" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"reflect" | |||
"sort" | |||
"strconv" | |||
"strings" | |||
"time" | |||
) | |||
type tomlEncodeError struct{ error } | |||
var ( | |||
errArrayMixedElementTypes = errors.New( | |||
"toml: cannot encode array with mixed element types") | |||
errArrayNilElement = errors.New( | |||
"toml: cannot encode array with nil element") | |||
errNonString = errors.New( | |||
"toml: cannot encode a map with non-string key type") | |||
errAnonNonStruct = errors.New( | |||
"toml: cannot encode an anonymous field that is not a struct") | |||
errArrayNoTable = errors.New( | |||
"toml: TOML array element cannot contain a table") | |||
errNoKey = errors.New( | |||
"toml: top-level values must be Go maps or structs") | |||
errAnything = errors.New("") // used in testing | |||
) | |||
var quotedReplacer = strings.NewReplacer( | |||
"\t", "\\t", | |||
"\n", "\\n", | |||
"\r", "\\r", | |||
"\"", "\\\"", | |||
"\\", "\\\\", | |||
) | |||
// Encoder controls the encoding of Go values to a TOML document to some | |||
// io.Writer. | |||
// | |||
// The indentation level can be controlled with the Indent field. | |||
type Encoder struct { | |||
// A single indentation level. By default it is two spaces. | |||
Indent string | |||
// hasWritten is whether we have written any output to w yet. | |||
hasWritten bool | |||
w *bufio.Writer | |||
} | |||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer | |||
// given. By default, a single indentation level is 2 spaces. | |||
func NewEncoder(w io.Writer) *Encoder { | |||
return &Encoder{ | |||
w: bufio.NewWriter(w), | |||
Indent: " ", | |||
} | |||
} | |||
// Encode writes a TOML representation of the Go value to the underlying | |||
// io.Writer. If the value given cannot be encoded to a valid TOML document, | |||
// then an error is returned. | |||
// | |||
// The mapping between Go values and TOML values should be precisely the same | |||
// as for the Decode* functions. Similarly, the TextMarshaler interface is | |||
// supported by encoding the resulting bytes as strings. (If you want to write | |||
// arbitrary binary data then you will need to use something like base64 since | |||
// TOML does not have any binary types.) | |||
// | |||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any | |||
// sub-hashes are encoded first. | |||
// | |||
// If a Go map is encoded, then its keys are sorted alphabetically for | |||
// deterministic output. More control over this behavior may be provided if | |||
// there is demand for it. | |||
// | |||
// Encoding Go values without a corresponding TOML representation---like map | |||
// types with non-string keys---will cause an error to be returned. Similarly | |||
// for mixed arrays/slices, arrays/slices with nil elements, embedded | |||
// non-struct types and nested slices containing maps or structs. | |||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK | |||
// and so is []map[string][]string.) | |||
func (enc *Encoder) Encode(v interface{}) error { | |||
rv := eindirect(reflect.ValueOf(v)) | |||
if err := enc.safeEncode(Key([]string{}), rv); err != nil { | |||
return err | |||
} | |||
return enc.w.Flush() | |||
} | |||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { | |||
defer func() { | |||
if r := recover(); r != nil { | |||
if terr, ok := r.(tomlEncodeError); ok { | |||
err = terr.error | |||
return | |||
} | |||
panic(r) | |||
} | |||
}() | |||
enc.encode(key, rv) | |||
return nil | |||
} | |||
func (enc *Encoder) encode(key Key, rv reflect.Value) { | |||
// Special case. Time needs to be in ISO8601 format. | |||
// Special case. If we can marshal the type to text, then we used that. | |||
// Basically, this prevents the encoder for handling these types as | |||
// generic structs (or whatever the underlying type of a TextMarshaler is). | |||
switch rv.Interface().(type) { | |||
case time.Time, TextMarshaler: | |||
enc.keyEqElement(key, rv) | |||
return | |||
} | |||
k := rv.Kind() | |||
switch k { | |||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | |||
reflect.Int64, | |||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, | |||
reflect.Uint64, | |||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: | |||
enc.keyEqElement(key, rv) | |||
case reflect.Array, reflect.Slice: | |||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { | |||
enc.eArrayOfTables(key, rv) | |||
} else { | |||
enc.keyEqElement(key, rv) | |||
} | |||
case reflect.Interface: | |||
if rv.IsNil() { | |||
return | |||
} | |||
enc.encode(key, rv.Elem()) | |||
case reflect.Map: | |||
if rv.IsNil() { | |||
return | |||
} | |||
enc.eTable(key, rv) | |||
case reflect.Ptr: | |||
if rv.IsNil() { | |||
return | |||
} | |||
enc.encode(key, rv.Elem()) | |||
case reflect.Struct: | |||
enc.eTable(key, rv) | |||
default: | |||
panic(e("unsupported type for key '%s': %s", key, k)) | |||
} | |||
} | |||
// eElement encodes any value that can be an array element (primitives and | |||
// arrays). | |||
func (enc *Encoder) eElement(rv reflect.Value) { | |||
switch v := rv.Interface().(type) { | |||
case time.Time: | |||
// Special case time.Time as a primitive. Has to come before | |||
// TextMarshaler below because time.Time implements | |||
// encoding.TextMarshaler, but we need to always use UTC. | |||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) | |||
return | |||
case TextMarshaler: | |||
// Special case. Use text marshaler if it's available for this value. | |||
if s, err := v.MarshalText(); err != nil { | |||
encPanic(err) | |||
} else { | |||
enc.writeQuoted(string(s)) | |||
} | |||
return | |||
} | |||
switch rv.Kind() { | |||
case reflect.Bool: | |||
enc.wf(strconv.FormatBool(rv.Bool())) | |||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | |||
reflect.Int64: | |||
enc.wf(strconv.FormatInt(rv.Int(), 10)) | |||
case reflect.Uint, reflect.Uint8, reflect.Uint16, | |||
reflect.Uint32, reflect.Uint64: | |||
enc.wf(strconv.FormatUint(rv.Uint(), 10)) | |||
case reflect.Float32: | |||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) | |||
case reflect.Float64: | |||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) | |||
case reflect.Array, reflect.Slice: | |||
enc.eArrayOrSliceElement(rv) | |||
case reflect.Interface: | |||
enc.eElement(rv.Elem()) | |||
case reflect.String: | |||
enc.writeQuoted(rv.String()) | |||
default: | |||
panic(e("unexpected primitive type: %s", rv.Kind())) | |||
} | |||
} | |||
// By the TOML spec, all floats must have a decimal with at least one | |||
// number on either side. | |||
func floatAddDecimal(fstr string) string { | |||
if !strings.Contains(fstr, ".") { | |||
return fstr + ".0" | |||
} | |||
return fstr | |||
} | |||
func (enc *Encoder) writeQuoted(s string) { | |||
enc.wf("\"%s\"", quotedReplacer.Replace(s)) | |||
} | |||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { | |||
length := rv.Len() | |||
enc.wf("[") | |||
for i := 0; i < length; i++ { | |||
elem := rv.Index(i) | |||
enc.eElement(elem) | |||
if i != length-1 { | |||
enc.wf(", ") | |||
} | |||
} | |||
enc.wf("]") | |||
} | |||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { | |||
if len(key) == 0 { | |||
encPanic(errNoKey) | |||
} | |||
for i := 0; i < rv.Len(); i++ { | |||
trv := rv.Index(i) | |||
if isNil(trv) { | |||
continue | |||
} | |||
panicIfInvalidKey(key) | |||
enc.newline() | |||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) | |||
enc.newline() | |||
enc.eMapOrStruct(key, trv) | |||
} | |||
} | |||
func (enc *Encoder) eTable(key Key, rv reflect.Value) { | |||
panicIfInvalidKey(key) | |||
if len(key) == 1 { | |||
// Output an extra newline between top-level tables. | |||
// (The newline isn't written if nothing else has been written though.) | |||
enc.newline() | |||
} | |||
if len(key) > 0 { | |||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) | |||
enc.newline() | |||
} | |||
enc.eMapOrStruct(key, rv) | |||
} | |||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { | |||
switch rv := eindirect(rv); rv.Kind() { | |||
case reflect.Map: | |||
enc.eMap(key, rv) | |||
case reflect.Struct: | |||
enc.eStruct(key, rv) | |||
default: | |||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) | |||
} | |||
} | |||
func (enc *Encoder) eMap(key Key, rv reflect.Value) { | |||
rt := rv.Type() | |||
if rt.Key().Kind() != reflect.String { | |||
encPanic(errNonString) | |||
} | |||
// Sort keys so that we have deterministic output. And write keys directly | |||
// underneath this key first, before writing sub-structs or sub-maps. | |||
var mapKeysDirect, mapKeysSub []string | |||
for _, mapKey := range rv.MapKeys() { | |||
k := mapKey.String() | |||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { | |||
mapKeysSub = append(mapKeysSub, k) | |||
} else { | |||
mapKeysDirect = append(mapKeysDirect, k) | |||
} | |||
} | |||
var writeMapKeys = func(mapKeys []string) { | |||
sort.Strings(mapKeys) | |||
for _, mapKey := range mapKeys { | |||
mrv := rv.MapIndex(reflect.ValueOf(mapKey)) | |||
if isNil(mrv) { | |||
// Don't write anything for nil fields. | |||
continue | |||
} | |||
enc.encode(key.add(mapKey), mrv) | |||
} | |||
} | |||
writeMapKeys(mapKeysDirect) | |||
writeMapKeys(mapKeysSub) | |||
} | |||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) { | |||
// Write keys for fields directly under this key first, because if we write | |||
// a field that creates a new table, then all keys under it will be in that | |||
// table (not the one we're writing here). | |||
rt := rv.Type() | |||
var fieldsDirect, fieldsSub [][]int | |||
var addFields func(rt reflect.Type, rv reflect.Value, start []int) | |||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) { | |||
for i := 0; i < rt.NumField(); i++ { | |||
f := rt.Field(i) | |||
// skip unexported fields | |||
if f.PkgPath != "" && !f.Anonymous { | |||
continue | |||
} | |||
frv := rv.Field(i) | |||
if f.Anonymous { | |||
t := f.Type | |||
switch t.Kind() { | |||
case reflect.Struct: | |||
// Treat anonymous struct fields with | |||
// tag names as though they are not | |||
// anonymous, like encoding/json does. | |||
if getOptions(f.Tag).name == "" { | |||
addFields(t, frv, f.Index) | |||
continue | |||
} | |||
case reflect.Ptr: | |||
if t.Elem().Kind() == reflect.Struct && | |||
getOptions(f.Tag).name == "" { | |||
if !frv.IsNil() { | |||
addFields(t.Elem(), frv.Elem(), f.Index) | |||
} | |||
continue | |||
} | |||
// Fall through to the normal field encoding logic below | |||
// for non-struct anonymous fields. | |||
} | |||
} | |||
if typeIsHash(tomlTypeOfGo(frv)) { | |||
fieldsSub = append(fieldsSub, append(start, f.Index...)) | |||
} else { | |||
fieldsDirect = append(fieldsDirect, append(start, f.Index...)) | |||
} | |||
} | |||
} | |||
addFields(rt, rv, nil) | |||
var writeFields = func(fields [][]int) { | |||
for _, fieldIndex := range fields { | |||
sft := rt.FieldByIndex(fieldIndex) | |||
sf := rv.FieldByIndex(fieldIndex) | |||
if isNil(sf) { | |||
// Don't write anything for nil fields. | |||
continue | |||
} | |||
opts := getOptions(sft.Tag) | |||
if opts.skip { | |||
continue | |||
} | |||
keyName := sft.Name | |||
if opts.name != "" { | |||
keyName = opts.name | |||
} | |||
if opts.omitempty && isEmpty(sf) { | |||
continue | |||
} | |||
if opts.omitzero && isZero(sf) { | |||
continue | |||
} | |||
enc.encode(key.add(keyName), sf) | |||
} | |||
} | |||
writeFields(fieldsDirect) | |||
writeFields(fieldsSub) | |||
} | |||
// tomlTypeName returns the TOML type name of the Go value's type. It is | |||
// used to determine whether the types of array elements are mixed (which is | |||
// forbidden). If the Go value is nil, then it is illegal for it to be an array | |||
// element, and valueIsNil is returned as true. | |||
// Returns the TOML type of a Go value. The type may be `nil`, which means | |||
// no concrete TOML type could be found. | |||
func tomlTypeOfGo(rv reflect.Value) tomlType { | |||
if isNil(rv) || !rv.IsValid() { | |||
return nil | |||
} | |||
switch rv.Kind() { | |||
case reflect.Bool: | |||
return tomlBool | |||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | |||
reflect.Int64, | |||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, | |||
reflect.Uint64: | |||
return tomlInteger | |||
case reflect.Float32, reflect.Float64: | |||
return tomlFloat | |||
case reflect.Array, reflect.Slice: | |||
if typeEqual(tomlHash, tomlArrayType(rv)) { | |||
return tomlArrayHash | |||
} | |||
return tomlArray | |||
case reflect.Ptr, reflect.Interface: | |||
return tomlTypeOfGo(rv.Elem()) | |||
case reflect.String: | |||
return tomlString | |||
case reflect.Map: | |||
return tomlHash | |||
case reflect.Struct: | |||
switch rv.Interface().(type) { | |||
case time.Time: | |||
return tomlDatetime | |||
case TextMarshaler: | |||
return tomlString | |||
default: | |||
return tomlHash | |||
} | |||
default: | |||
panic("unexpected reflect.Kind: " + rv.Kind().String()) | |||
} | |||
} | |||
// tomlArrayType returns the element type of a TOML array. The type returned | |||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length | |||
// slize). This function may also panic if it finds a type that cannot be | |||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly | |||
// nested arrays of tables). | |||
func tomlArrayType(rv reflect.Value) tomlType { | |||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { | |||
return nil | |||
} | |||
firstType := tomlTypeOfGo(rv.Index(0)) | |||
if firstType == nil { | |||
encPanic(errArrayNilElement) | |||
} | |||
rvlen := rv.Len() | |||
for i := 1; i < rvlen; i++ { | |||
elem := rv.Index(i) | |||
switch elemType := tomlTypeOfGo(elem); { | |||
case elemType == nil: | |||
encPanic(errArrayNilElement) | |||
case !typeEqual(firstType, elemType): | |||
encPanic(errArrayMixedElementTypes) | |||
} | |||
} | |||
// If we have a nested array, then we must make sure that the nested | |||
// array contains ONLY primitives. | |||
// This checks arbitrarily nested arrays. | |||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { | |||
nest := tomlArrayType(eindirect(rv.Index(0))) | |||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { | |||
encPanic(errArrayNoTable) | |||
} | |||
} | |||
return firstType | |||
} | |||
type tagOptions struct { | |||
skip bool // "-" | |||
name string | |||
omitempty bool | |||
omitzero bool | |||
} | |||
func getOptions(tag reflect.StructTag) tagOptions { | |||
t := tag.Get("toml") | |||
if t == "-" { | |||
return tagOptions{skip: true} | |||
} | |||
var opts tagOptions | |||
parts := strings.Split(t, ",") | |||
opts.name = parts[0] | |||
for _, s := range parts[1:] { | |||
switch s { | |||
case "omitempty": | |||
opts.omitempty = true | |||
case "omitzero": | |||
opts.omitzero = true | |||
} | |||
} | |||
return opts | |||
} | |||
func isZero(rv reflect.Value) bool { | |||
switch rv.Kind() { | |||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | |||
return rv.Int() == 0 | |||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | |||
return rv.Uint() == 0 | |||
case reflect.Float32, reflect.Float64: | |||
return rv.Float() == 0.0 | |||
} | |||
return false | |||
} | |||
func isEmpty(rv reflect.Value) bool { | |||
switch rv.Kind() { | |||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String: | |||
return rv.Len() == 0 | |||
case reflect.Bool: | |||
return !rv.Bool() | |||
} | |||
return false | |||
} | |||
func (enc *Encoder) newline() { | |||
if enc.hasWritten { | |||
enc.wf("\n") | |||
} | |||
} | |||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { | |||
if len(key) == 0 { | |||
encPanic(errNoKey) | |||
} | |||
panicIfInvalidKey(key) | |||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) | |||
enc.eElement(val) | |||
enc.newline() | |||
} | |||
func (enc *Encoder) wf(format string, v ...interface{}) { | |||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { | |||
encPanic(err) | |||
} | |||
enc.hasWritten = true | |||
} | |||
func (enc *Encoder) indentStr(key Key) string { | |||
return strings.Repeat(enc.Indent, len(key)-1) | |||
} | |||
func encPanic(err error) { | |||
panic(tomlEncodeError{err}) | |||
} | |||
func eindirect(v reflect.Value) reflect.Value { | |||
switch v.Kind() { | |||
case reflect.Ptr, reflect.Interface: | |||
return eindirect(v.Elem()) | |||
default: | |||
return v | |||
} | |||
} | |||
func isNil(rv reflect.Value) bool { | |||
switch rv.Kind() { | |||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: | |||
return rv.IsNil() | |||
default: | |||
return false | |||
} | |||
} | |||
func panicIfInvalidKey(key Key) { | |||
for _, k := range key { | |||
if len(k) == 0 { | |||
encPanic(e("Key '%s' is not a valid table name. Key names "+ | |||
"cannot be empty.", key.maybeQuotedAll())) | |||
} | |||
} | |||
} | |||
func isValidKeyName(s string) bool { | |||
return len(s) != 0 | |||
} |
@ -0,0 +1,19 @@ | |||
// +build go1.2 | |||
package toml | |||
// In order to support Go 1.1, we define our own TextMarshaler and | |||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the | |||
// standard library interfaces. | |||
import ( | |||
"encoding" | |||
) | |||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here | |||
// so that Go 1.1 can be supported. | |||
type TextMarshaler encoding.TextMarshaler | |||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined | |||
// here so that Go 1.1 can be supported. | |||
type TextUnmarshaler encoding.TextUnmarshaler |
@ -0,0 +1,18 @@ | |||
// +build !go1.2 | |||
package toml | |||
// These interfaces were introduced in Go 1.2, so we add them manually when | |||
// compiling for Go 1.1. | |||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here | |||
// so that Go 1.1 can be supported. | |||
type TextMarshaler interface { | |||
MarshalText() (text []byte, err error) | |||
} | |||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined | |||
// here so that Go 1.1 can be supported. | |||
type TextUnmarshaler interface { | |||
UnmarshalText(text []byte) error | |||
} |
@ -0,0 +1,953 @@ | |||
package toml | |||
import ( | |||
"fmt" | |||
"strings" | |||
"unicode" | |||
"unicode/utf8" | |||
) | |||
type itemType int | |||
const ( | |||
itemError itemType = iota | |||
itemNIL // used in the parser to indicate no type | |||
itemEOF | |||
itemText | |||
itemString | |||
itemRawString | |||
itemMultilineString | |||
itemRawMultilineString | |||
itemBool | |||
itemInteger | |||
itemFloat | |||
itemDatetime | |||
itemArray // the start of an array | |||
itemArrayEnd | |||
itemTableStart | |||
itemTableEnd | |||
itemArrayTableStart | |||
itemArrayTableEnd | |||
itemKeyStart | |||
itemCommentStart | |||
itemInlineTableStart | |||
itemInlineTableEnd | |||
) | |||
const ( | |||
eof = 0 | |||
comma = ',' | |||
tableStart = '[' | |||
tableEnd = ']' | |||
arrayTableStart = '[' | |||
arrayTableEnd = ']' | |||
tableSep = '.' | |||
keySep = '=' | |||
arrayStart = '[' | |||
arrayEnd = ']' | |||
commentStart = '#' | |||
stringStart = '"' | |||
stringEnd = '"' | |||
rawStringStart = '\'' | |||
rawStringEnd = '\'' | |||
inlineTableStart = '{' | |||
inlineTableEnd = '}' | |||
) | |||
type stateFn func(lx *lexer) stateFn | |||
type lexer struct { | |||
input string | |||
start int | |||
pos int | |||
line int | |||
state stateFn | |||
items chan item | |||
// Allow for backing up up to three runes. | |||
// This is necessary because TOML contains 3-rune tokens (""" and '''). | |||
prevWidths [3]int | |||
nprev int // how many of prevWidths are in use | |||
// If we emit an eof, we can still back up, but it is not OK to call | |||
// next again. | |||
atEOF bool | |||
// A stack of state functions used to maintain context. | |||
// The idea is to reuse parts of the state machine in various places. | |||
// For example, values can appear at the top level or within arbitrarily | |||
// nested arrays. The last state on the stack is used after a value has | |||
// been lexed. Similarly for comments. | |||
stack []stateFn | |||
} | |||
type item struct { | |||
typ itemType | |||
val string | |||
line int | |||
} | |||
func (lx *lexer) nextItem() item { | |||
for { | |||
select { | |||
case item := <-lx.items: | |||
return item | |||
default: | |||
lx.state = lx.state(lx) | |||
} | |||
} | |||
} | |||
func lex(input string) *lexer { | |||
lx := &lexer{ | |||
input: input, | |||
state: lexTop, | |||
line: 1, | |||
items: make(chan item, 10), | |||
stack: make([]stateFn, 0, 10), | |||
} | |||
return lx | |||
} | |||
func (lx *lexer) push(state stateFn) { | |||
lx.stack = append(lx.stack, state) | |||
} | |||
func (lx *lexer) pop() stateFn { | |||
if len(lx.stack) == 0 { | |||
return lx.errorf("BUG in lexer: no states to pop") | |||
} | |||
last := lx.stack[len(lx.stack)-1] | |||
lx.stack = lx.stack[0 : len(lx.stack)-1] | |||
return last | |||
} | |||
func (lx *lexer) current() string { | |||
return lx.input[lx.start:lx.pos] | |||
} | |||
func (lx *lexer) emit(typ itemType) { | |||
lx.items <- item{typ, lx.current(), lx.line} | |||
lx.start = lx.pos | |||
} | |||
func (lx *lexer) emitTrim(typ itemType) { | |||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} | |||
lx.start = lx.pos | |||
} | |||
func (lx *lexer) next() (r rune) { | |||
if lx.atEOF { | |||
panic("next called after EOF") | |||
} | |||
if lx.pos >= len(lx.input) { | |||
lx.atEOF = true | |||
return eof | |||
} | |||
if lx.input[lx.pos] == '\n' { | |||
lx.line++ | |||
} | |||
lx.prevWidths[2] = lx.prevWidths[1] | |||
lx.prevWidths[1] = lx.prevWidths[0] | |||
if lx.nprev < 3 { | |||
lx.nprev++ | |||
} | |||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) | |||
lx.prevWidths[0] = w | |||
lx.pos += w | |||
return r | |||
} | |||
// ignore skips over the pending input before this point. | |||
func (lx *lexer) ignore() { | |||
lx.start = lx.pos | |||
} | |||
// backup steps back one rune. Can be called only twice between calls to next. | |||
func (lx *lexer) backup() { | |||
if lx.atEOF { | |||
lx.atEOF = false | |||
return | |||
} | |||
if lx.nprev < 1 { | |||
panic("backed up too far") | |||
} | |||
w := lx.prevWidths[0] | |||
lx.prevWidths[0] = lx.prevWidths[1] | |||
lx.prevWidths[1] = lx.prevWidths[2] | |||
lx.nprev-- | |||
lx.pos -= w | |||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { | |||
lx.line-- | |||
} | |||
} | |||
// accept consumes the next rune if it's equal to `valid`. | |||
func (lx *lexer) accept(valid rune) bool { | |||
if lx.next() == valid { | |||
return true | |||
} | |||
lx.backup() | |||
return false | |||
} | |||
// peek returns but does not consume the next rune in the input. | |||
func (lx *lexer) peek() rune { | |||
r := lx.next() | |||
lx.backup() | |||
return r | |||
} | |||
// skip ignores all input that matches the given predicate. | |||
func (lx *lexer) skip(pred func(rune) bool) { | |||
for { | |||
r := lx.next() | |||
if pred(r) { | |||
continue | |||
} | |||
lx.backup() | |||
lx.ignore() | |||
return | |||
} | |||
} | |||
// errorf stops all lexing by emitting an error and returning `nil`. | |||
// Note that any value that is a character is escaped if it's a special | |||
// character (newlines, tabs, etc.). | |||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn { | |||
lx.items <- item{ | |||
itemError, | |||
fmt.Sprintf(format, values...), | |||
lx.line, | |||
} | |||
return nil | |||
} | |||
// lexTop consumes elements at the top level of TOML data. | |||
func lexTop(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isWhitespace(r) || isNL(r) { | |||
return lexSkip(lx, lexTop) | |||
} | |||
switch r { | |||
case commentStart: | |||
lx.push(lexTop) | |||
return lexCommentStart | |||
case tableStart: | |||
return lexTableStart | |||
case eof: | |||
if lx.pos > lx.start { | |||
return lx.errorf("unexpected EOF") | |||
} | |||
lx.emit(itemEOF) | |||
return nil | |||
} | |||
// At this point, the only valid item can be a key, so we back up | |||
// and let the key lexer do the rest. | |||
lx.backup() | |||
lx.push(lexTopEnd) | |||
return lexKeyStart | |||
} | |||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value | |||
// or a table.) It must see only whitespace, and will turn back to lexTop | |||
// upon a newline. If it sees EOF, it will quit the lexer successfully. | |||
func lexTopEnd(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case r == commentStart: | |||
// a comment will read to a newline for us. | |||
lx.push(lexTop) | |||
return lexCommentStart | |||
case isWhitespace(r): | |||
return lexTopEnd | |||
case isNL(r): | |||
lx.ignore() | |||
return lexTop | |||
case r == eof: | |||
lx.emit(itemEOF) | |||
return nil | |||
} | |||
return lx.errorf("expected a top-level item to end with a newline, "+ | |||
"comment, or EOF, but got %q instead", r) | |||
} | |||
// lexTable lexes the beginning of a table. Namely, it makes sure that | |||
// it starts with a character other than '.' and ']'. | |||
// It assumes that '[' has already been consumed. | |||
// It also handles the case that this is an item in an array of tables. | |||
// e.g., '[[name]]'. | |||
func lexTableStart(lx *lexer) stateFn { | |||
if lx.peek() == arrayTableStart { | |||
lx.next() | |||
lx.emit(itemArrayTableStart) | |||
lx.push(lexArrayTableEnd) | |||
} else { | |||
lx.emit(itemTableStart) | |||
lx.push(lexTableEnd) | |||
} | |||
return lexTableNameStart | |||
} | |||
func lexTableEnd(lx *lexer) stateFn { | |||
lx.emit(itemTableEnd) | |||
return lexTopEnd | |||
} | |||
func lexArrayTableEnd(lx *lexer) stateFn { | |||
if r := lx.next(); r != arrayTableEnd { | |||
return lx.errorf("expected end of table array name delimiter %q, "+ | |||
"but got %q instead", arrayTableEnd, r) | |||
} | |||
lx.emit(itemArrayTableEnd) | |||
return lexTopEnd | |||
} | |||
func lexTableNameStart(lx *lexer) stateFn { | |||
lx.skip(isWhitespace) | |||
switch r := lx.peek(); { | |||
case r == tableEnd || r == eof: | |||
return lx.errorf("unexpected end of table name " + | |||
"(table names cannot be empty)") | |||
case r == tableSep: | |||
return lx.errorf("unexpected table separator " + | |||
"(table names cannot be empty)") | |||
case r == stringStart || r == rawStringStart: | |||
lx.ignore() | |||
lx.push(lexTableNameEnd) | |||
return lexValue // reuse string lexing | |||
default: | |||
return lexBareTableName | |||
} | |||
} | |||
// lexBareTableName lexes the name of a table. It assumes that at least one | |||
// valid character for the table has already been read. | |||
func lexBareTableName(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isBareKeyChar(r) { | |||
return lexBareTableName | |||
} | |||
lx.backup() | |||
lx.emit(itemText) | |||
return lexTableNameEnd | |||
} | |||
// lexTableNameEnd reads the end of a piece of a table name, optionally | |||
// consuming whitespace. | |||
func lexTableNameEnd(lx *lexer) stateFn { | |||
lx.skip(isWhitespace) | |||
switch r := lx.next(); { | |||
case isWhitespace(r): | |||
return lexTableNameEnd | |||
case r == tableSep: | |||
lx.ignore() | |||
return lexTableNameStart | |||
case r == tableEnd: | |||
return lx.pop() | |||
default: | |||
return lx.errorf("expected '.' or ']' to end table name, "+ | |||
"but got %q instead", r) | |||
} | |||
} | |||
// lexKeyStart consumes a key name up until the first non-whitespace character. | |||
// lexKeyStart will ignore whitespace. | |||
func lexKeyStart(lx *lexer) stateFn { | |||
r := lx.peek() | |||
switch { | |||
case r == keySep: | |||
return lx.errorf("unexpected key separator %q", keySep) | |||
case isWhitespace(r) || isNL(r): | |||
lx.next() | |||
return lexSkip(lx, lexKeyStart) | |||
case r == stringStart || r == rawStringStart: | |||
lx.ignore() | |||
lx.emit(itemKeyStart) | |||
lx.push(lexKeyEnd) | |||
return lexValue // reuse string lexing | |||
default: | |||
lx.ignore() | |||
lx.emit(itemKeyStart) | |||
return lexBareKey | |||
} | |||
} | |||
// lexBareKey consumes the text of a bare key. Assumes that the first character | |||
// (which is not whitespace) has not yet been consumed. | |||
func lexBareKey(lx *lexer) stateFn { | |||
switch r := lx.next(); { | |||
case isBareKeyChar(r): | |||
return lexBareKey | |||
case isWhitespace(r): | |||
lx.backup() | |||
lx.emit(itemText) | |||
return lexKeyEnd | |||
case r == keySep: | |||
lx.backup() | |||
lx.emit(itemText) | |||
return lexKeyEnd | |||
default: | |||
return lx.errorf("bare keys cannot contain %q", r) | |||
} | |||
} | |||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key | |||
// separator). | |||
func lexKeyEnd(lx *lexer) stateFn { | |||
switch r := lx.next(); { | |||
case r == keySep: | |||
return lexSkip(lx, lexValue) | |||
case isWhitespace(r): | |||
return lexSkip(lx, lexKeyEnd) | |||
default: | |||
return lx.errorf("expected key separator %q, but got %q instead", | |||
keySep, r) | |||
} | |||
} | |||
// lexValue starts the consumption of a value anywhere a value is expected. | |||
// lexValue will ignore whitespace. | |||
// After a value is lexed, the last state on the next is popped and returned. | |||
func lexValue(lx *lexer) stateFn { | |||
// We allow whitespace to precede a value, but NOT newlines. | |||
// In array syntax, the array states are responsible for ignoring newlines. | |||
r := lx.next() | |||
switch { | |||
case isWhitespace(r): | |||
return lexSkip(lx, lexValue) | |||
case isDigit(r): | |||
lx.backup() // avoid an extra state and use the same as above | |||
return lexNumberOrDateStart | |||
} | |||
switch r { | |||
case arrayStart: | |||
lx.ignore() | |||
lx.emit(itemArray) | |||
return lexArrayValue | |||
case inlineTableStart: | |||
lx.ignore() | |||
lx.emit(itemInlineTableStart) | |||
return lexInlineTableValue | |||
case stringStart: | |||
if lx.accept(stringStart) { | |||
if lx.accept(stringStart) { | |||
lx.ignore() // Ignore """ | |||
return lexMultilineString | |||
} | |||
lx.backup() | |||
} | |||
lx.ignore() // ignore the '"' | |||
return lexString | |||
case rawStringStart: | |||
if lx.accept(rawStringStart) { | |||
if lx.accept(rawStringStart) { | |||
lx.ignore() // Ignore """ | |||
return lexMultilineRawString | |||
} | |||
lx.backup() | |||
} | |||
lx.ignore() // ignore the "'" | |||
return lexRawString | |||
case '+', '-': | |||
return lexNumberStart | |||
case '.': // special error case, be kind to users | |||
return lx.errorf("floats must start with a digit, not '.'") | |||
} | |||
if unicode.IsLetter(r) { | |||
// Be permissive here; lexBool will give a nice error if the | |||
// user wrote something like | |||
// x = foo | |||
// (i.e. not 'true' or 'false' but is something else word-like.) | |||
lx.backup() | |||
return lexBool | |||
} | |||
return lx.errorf("expected value but found %q instead", r) | |||
} | |||
// lexArrayValue consumes one value in an array. It assumes that '[' or ',' | |||
// have already been consumed. All whitespace and newlines are ignored. | |||
func lexArrayValue(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case isWhitespace(r) || isNL(r): | |||
return lexSkip(lx, lexArrayValue) | |||
case r == commentStart: | |||
lx.push(lexArrayValue) | |||
return lexCommentStart | |||
case r == comma: | |||
return lx.errorf("unexpected comma") | |||
case r == arrayEnd: | |||
// NOTE(caleb): The spec isn't clear about whether you can have | |||
// a trailing comma or not, so we'll allow it. | |||
return lexArrayEnd | |||
} | |||
lx.backup() | |||
lx.push(lexArrayValueEnd) | |||
return lexValue | |||
} | |||
// lexArrayValueEnd consumes everything between the end of an array value and | |||
// the next value (or the end of the array): it ignores whitespace and newlines | |||
// and expects either a ',' or a ']'. | |||
func lexArrayValueEnd(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case isWhitespace(r) || isNL(r): | |||
return lexSkip(lx, lexArrayValueEnd) | |||
case r == commentStart: | |||
lx.push(lexArrayValueEnd) | |||
return lexCommentStart | |||
case r == comma: | |||
lx.ignore() | |||
return lexArrayValue // move on to the next value | |||
case r == arrayEnd: | |||
return lexArrayEnd | |||
} | |||
return lx.errorf( | |||
"expected a comma or array terminator %q, but got %q instead", | |||
arrayEnd, r, | |||
) | |||
} | |||
// lexArrayEnd finishes the lexing of an array. | |||
// It assumes that a ']' has just been consumed. | |||
func lexArrayEnd(lx *lexer) stateFn { | |||
lx.ignore() | |||
lx.emit(itemArrayEnd) | |||
return lx.pop() | |||
} | |||
// lexInlineTableValue consumes one key/value pair in an inline table. | |||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. | |||
func lexInlineTableValue(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case isWhitespace(r): | |||
return lexSkip(lx, lexInlineTableValue) | |||
case isNL(r): | |||
return lx.errorf("newlines not allowed within inline tables") | |||
case r == commentStart: | |||
lx.push(lexInlineTableValue) | |||
return lexCommentStart | |||
case r == comma: | |||
return lx.errorf("unexpected comma") | |||
case r == inlineTableEnd: | |||
return lexInlineTableEnd | |||
} | |||
lx.backup() | |||
lx.push(lexInlineTableValueEnd) | |||
return lexKeyStart | |||
} | |||
// lexInlineTableValueEnd consumes everything between the end of an inline table | |||
// key/value pair and the next pair (or the end of the table): | |||
// it ignores whitespace and expects either a ',' or a '}'. | |||
func lexInlineTableValueEnd(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case isWhitespace(r): | |||
return lexSkip(lx, lexInlineTableValueEnd) | |||
case isNL(r): | |||
return lx.errorf("newlines not allowed within inline tables") | |||
case r == commentStart: | |||
lx.push(lexInlineTableValueEnd) | |||
return lexCommentStart | |||
case r == comma: | |||
lx.ignore() | |||
return lexInlineTableValue | |||
case r == inlineTableEnd: | |||
return lexInlineTableEnd | |||
} | |||
return lx.errorf("expected a comma or an inline table terminator %q, "+ | |||
"but got %q instead", inlineTableEnd, r) | |||
} | |||
// lexInlineTableEnd finishes the lexing of an inline table. | |||
// It assumes that a '}' has just been consumed. | |||
func lexInlineTableEnd(lx *lexer) stateFn { | |||
lx.ignore() | |||
lx.emit(itemInlineTableEnd) | |||
return lx.pop() | |||
} | |||
// lexString consumes the inner contents of a string. It assumes that the | |||
// beginning '"' has already been consumed and ignored. | |||
func lexString(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case r == eof: | |||
return lx.errorf("unexpected EOF") | |||
case isNL(r): | |||
return lx.errorf("strings cannot contain newlines") | |||
case r == '\\': | |||
lx.push(lexString) | |||
return lexStringEscape | |||
case r == stringEnd: | |||
lx.backup() | |||
lx.emit(itemString) | |||
lx.next() | |||
lx.ignore() | |||
return lx.pop() | |||
} | |||
return lexString | |||
} | |||
// lexMultilineString consumes the inner contents of a string. It assumes that | |||
// the beginning '"""' has already been consumed and ignored. | |||
func lexMultilineString(lx *lexer) stateFn { | |||
switch lx.next() { | |||
case eof: | |||
return lx.errorf("unexpected EOF") | |||
case '\\': | |||
return lexMultilineStringEscape | |||
case stringEnd: | |||
if lx.accept(stringEnd) { | |||
if lx.accept(stringEnd) { | |||
lx.backup() | |||
lx.backup() | |||
lx.backup() | |||
lx.emit(itemMultilineString) | |||
lx.next() | |||
lx.next() | |||
lx.next() | |||
lx.ignore() | |||
return lx.pop() | |||
} | |||
lx.backup() | |||
} | |||
} | |||
return lexMultilineString | |||
} | |||
// lexRawString consumes a raw string. Nothing can be escaped in such a string. | |||
// It assumes that the beginning "'" has already been consumed and ignored. | |||
func lexRawString(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch { | |||
case r == eof: | |||
return lx.errorf("unexpected EOF") | |||
case isNL(r): | |||
return lx.errorf("strings cannot contain newlines") | |||
case r == rawStringEnd: | |||
lx.backup() | |||
lx.emit(itemRawString) | |||
lx.next() | |||
lx.ignore() | |||
return lx.pop() | |||
} | |||
return lexRawString | |||
} | |||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such | |||
// a string. It assumes that the beginning "'''" has already been consumed and | |||
// ignored. | |||
func lexMultilineRawString(lx *lexer) stateFn { | |||
switch lx.next() { | |||
case eof: | |||
return lx.errorf("unexpected EOF") | |||
case rawStringEnd: | |||
if lx.accept(rawStringEnd) { | |||
if lx.accept(rawStringEnd) { | |||
lx.backup() | |||
lx.backup() | |||
lx.backup() | |||
lx.emit(itemRawMultilineString) | |||
lx.next() | |||
lx.next() | |||
lx.next() | |||
lx.ignore() | |||
return lx.pop() | |||
} | |||
lx.backup() | |||
} | |||
} | |||
return lexMultilineRawString | |||
} | |||
// lexMultilineStringEscape consumes an escaped character. It assumes that the | |||
// preceding '\\' has already been consumed. | |||
func lexMultilineStringEscape(lx *lexer) stateFn { | |||
// Handle the special case first: | |||
if isNL(lx.next()) { | |||
return lexMultilineString | |||
} | |||
lx.backup() | |||
lx.push(lexMultilineString) | |||
return lexStringEscape(lx) | |||
} | |||
func lexStringEscape(lx *lexer) stateFn { | |||
r := lx.next() | |||
switch r { | |||
case 'b': | |||
fallthrough | |||
case 't': | |||
fallthrough | |||
case 'n': | |||
fallthrough | |||
case 'f': | |||
fallthrough | |||
case 'r': | |||
fallthrough | |||
case '"': | |||
fallthrough | |||
case '\\': | |||
return lx.pop() | |||
case 'u': | |||
return lexShortUnicodeEscape | |||
case 'U': | |||
return lexLongUnicodeEscape | |||
} | |||
return lx.errorf("invalid escape character %q; only the following "+ | |||
"escape characters are allowed: "+ | |||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) | |||
} | |||
func lexShortUnicodeEscape(lx *lexer) stateFn { | |||
var r rune | |||
for i := 0; i < 4; i++ { | |||
r = lx.next() | |||
if !isHexadecimal(r) { | |||
return lx.errorf(`expected four hexadecimal digits after '\u', `+ | |||
"but got %q instead", lx.current()) | |||
} | |||
} | |||
return lx.pop() | |||
} | |||
func lexLongUnicodeEscape(lx *lexer) stateFn { | |||
var r rune | |||
for i := 0; i < 8; i++ { | |||
r = lx.next() | |||
if !isHexadecimal(r) { | |||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+ | |||
"but got %q instead", lx.current()) | |||
} | |||
} | |||
return lx.pop() | |||
} | |||
// lexNumberOrDateStart consumes either an integer, a float, or datetime. | |||
func lexNumberOrDateStart(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isDigit(r) { | |||
return lexNumberOrDate | |||
} | |||
switch r { | |||
case '_': | |||
return lexNumber | |||
case 'e', 'E': | |||
return lexFloat | |||
case '.': | |||
return lx.errorf("floats must start with a digit, not '.'") | |||
} | |||
return lx.errorf("expected a digit but got %q", r) | |||
} | |||
// lexNumberOrDate consumes either an integer, float or datetime. | |||
func lexNumberOrDate(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isDigit(r) { | |||
return lexNumberOrDate | |||
} | |||
switch r { | |||
case '-': | |||
return lexDatetime | |||
case '_': | |||
return lexNumber | |||
case '.', 'e', 'E': | |||
return lexFloat | |||
} | |||
lx.backup() | |||
lx.emit(itemInteger) | |||
return lx.pop() | |||
} | |||
// lexDatetime consumes a Datetime, to a first approximation. | |||
// The parser validates that it matches one of the accepted formats. | |||
func lexDatetime(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isDigit(r) { | |||
return lexDatetime | |||
} | |||
switch r { | |||
case '-', 'T', ':', '.', 'Z', '+': | |||
return lexDatetime | |||
} | |||
lx.backup() | |||
lx.emit(itemDatetime) | |||
return lx.pop() | |||
} | |||
// lexNumberStart consumes either an integer or a float. It assumes that a sign | |||
// has already been read, but that *no* digits have been consumed. | |||
// lexNumberStart will move to the appropriate integer or float states. | |||
func lexNumberStart(lx *lexer) stateFn { | |||
// We MUST see a digit. Even floats have to start with a digit. | |||
r := lx.next() | |||
if !isDigit(r) { | |||
if r == '.' { | |||
return lx.errorf("floats must start with a digit, not '.'") | |||
} | |||
return lx.errorf("expected a digit but got %q", r) | |||
} | |||
return lexNumber | |||
} | |||
// lexNumber consumes an integer or a float after seeing the first digit. | |||
func lexNumber(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isDigit(r) { | |||
return lexNumber | |||
} | |||
switch r { | |||
case '_': | |||
return lexNumber | |||
case '.', 'e', 'E': | |||
return lexFloat | |||
} | |||
lx.backup() | |||
lx.emit(itemInteger) | |||
return lx.pop() | |||
} | |||
// lexFloat consumes the elements of a float. It allows any sequence of | |||
// float-like characters, so floats emitted by the lexer are only a first | |||
// approximation and must be validated by the parser. | |||
func lexFloat(lx *lexer) stateFn { | |||
r := lx.next() | |||
if isDigit(r) { | |||
return lexFloat | |||
} | |||
switch r { | |||
case '_', '.', '-', '+', 'e', 'E': | |||
return lexFloat | |||
} | |||
lx.backup() | |||
lx.emit(itemFloat) | |||
return lx.pop() | |||
} | |||
// lexBool consumes a bool string: 'true' or 'false. | |||
func lexBool(lx *lexer) stateFn { | |||
var rs []rune | |||
for { | |||
r := lx.next() | |||
if !unicode.IsLetter(r) { | |||
lx.backup() | |||
break | |||
} | |||
rs = append(rs, r) | |||
} | |||
s := string(rs) | |||
switch s { | |||
case "true", "false": | |||
lx.emit(itemBool) | |||
return lx.pop() | |||
} | |||
return lx.errorf("expected value but found %q instead", s) | |||
} | |||
// lexCommentStart begins the lexing of a comment. It will emit | |||
// itemCommentStart and consume no characters, passing control to lexComment. | |||
func lexCommentStart(lx *lexer) stateFn { | |||
lx.ignore() | |||
lx.emit(itemCommentStart) | |||
return lexComment | |||
} | |||
// lexComment lexes an entire comment. It assumes that '#' has been consumed. | |||
// It will consume *up to* the first newline character, and pass control | |||
// back to the last state on the stack. | |||
func lexComment(lx *lexer) stateFn { | |||
r := lx.peek() | |||
if isNL(r) || r == eof { | |||
lx.emit(itemText) | |||
return lx.pop() | |||
} | |||
lx.next() | |||
return lexComment | |||
} | |||
// lexSkip ignores all slurped input and moves on to the next state. | |||
func lexSkip(lx *lexer, nextState stateFn) stateFn { | |||
return func(lx *lexer) stateFn { | |||
lx.ignore() | |||
return nextState | |||
} | |||
} | |||
// isWhitespace returns true if `r` is a whitespace character according | |||
// to the spec. | |||
func isWhitespace(r rune) bool { | |||
return r == '\t' || r == ' ' | |||
} | |||
func isNL(r rune) bool { | |||
return r == '\n' || r == '\r' | |||
} | |||
func isDigit(r rune) bool { | |||
return r >= '0' && r <= '9' | |||
} | |||
func isHexadecimal(r rune) bool { | |||
return (r >= '0' && r <= '9') || | |||
(r >= 'a' && r <= 'f') || | |||
(r >= 'A' && r <= 'F') | |||
} | |||
func isBareKeyChar(r rune) bool { | |||
return (r >= 'A' && r <= 'Z') || | |||
(r >= 'a' && r <= 'z') || | |||
(r >= '0' && r <= '9') || | |||
r == '_' || | |||
r == '-' | |||
} | |||
func (itype itemType) String() string { | |||
switch itype { | |||
case itemError: | |||
return "Error" | |||
case itemNIL: | |||
return "NIL" | |||
case itemEOF: | |||
return "EOF" | |||
case itemText: | |||
return "Text" | |||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString: | |||
return "String" | |||
case itemBool: | |||
return "Bool" | |||
case itemInteger: | |||
return "Integer" | |||
case itemFloat: | |||
return "Float" | |||
case itemDatetime: | |||
return "DateTime" | |||
case itemTableStart: | |||
return "TableStart" | |||
case itemTableEnd: | |||
return "TableEnd" | |||
case itemKeyStart: | |||
return "KeyStart" | |||
case itemArray: | |||
return "Array" | |||
case itemArrayEnd: | |||
return "ArrayEnd" | |||
case itemCommentStart: | |||
return "CommentStart" | |||
} | |||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) | |||
} | |||
func (item item) String() string { | |||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) | |||
} |
@ -0,0 +1,592 @@ | |||
package toml | |||
import ( | |||
"fmt" | |||
"strconv" | |||
"strings" | |||
"time" | |||
"unicode" | |||
"unicode/utf8" | |||
) | |||
type parser struct { | |||
mapping map[string]interface{} | |||
types map[string]tomlType | |||
lx *lexer | |||
// A list of keys in the order that they appear in the TOML data. | |||
ordered []Key | |||
// the full key for the current hash in scope | |||
context Key | |||
// the base key name for everything except hashes | |||
currentKey string | |||
// rough approximation of line number | |||
approxLine int | |||
// A map of 'key.group.names' to whether they were created implicitly. | |||
implicits map[string]bool | |||
} | |||
type parseError string | |||
func (pe parseError) Error() string { | |||
return string(pe) | |||
} | |||
func parse(data string) (p *parser, err error) { | |||
defer func() { | |||
if r := recover(); r != nil { | |||
var ok bool | |||
if err, ok = r.(parseError); ok { | |||
return | |||
} | |||
panic(r) | |||
} | |||
}() | |||
p = &parser{ | |||
mapping: make(map[string]interface{}), | |||
types: make(map[string]tomlType), | |||
lx: lex(data), | |||
ordered: make([]Key, 0), | |||
implicits: make(map[string]bool), | |||
} | |||
for { | |||
item := p.next() | |||
if item.typ == itemEOF { | |||
break | |||
} | |||
p.topLevel(item) | |||
} | |||
return p, nil | |||
} | |||
func (p *parser) panicf(format string, v ...interface{}) { | |||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", | |||
p.approxLine, p.current(), fmt.Sprintf(format, v...)) | |||
panic(parseError(msg)) | |||
} | |||
func (p *parser) next() item { | |||
it := p.lx.nextItem() | |||
if it.typ == itemError { | |||
p.panicf("%s", it.val) | |||
} | |||
return it | |||
} | |||
func (p *parser) bug(format string, v ...interface{}) { | |||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) | |||
} | |||
func (p *parser) expect(typ itemType) item { | |||
it := p.next() | |||
p.assertEqual(typ, it.typ) | |||
return it | |||
} | |||
func (p *parser) assertEqual(expected, got itemType) { | |||
if expected != got { | |||
p.bug("Expected '%s' but got '%s'.", expected, got) | |||
} | |||
} | |||
func (p *parser) topLevel(item item) { | |||
switch item.typ { | |||
case itemCommentStart: | |||
p.approxLine = item.line | |||
p.expect(itemText) | |||
case itemTableStart: | |||
kg := p.next() | |||
p.approxLine = kg.line | |||
var key Key | |||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { | |||
key = append(key, p.keyString(kg)) | |||
} | |||
p.assertEqual(itemTableEnd, kg.typ) | |||
p.establishContext(key, false) | |||
p.setType("", tomlHash) | |||
p.ordered = append(p.ordered, key) | |||
case itemArrayTableStart: | |||
kg := p.next() | |||
p.approxLine = kg.line | |||
var key Key | |||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { | |||
key = append(key, p.keyString(kg)) | |||
} | |||
p.assertEqual(itemArrayTableEnd, kg.typ) | |||
p.establishContext(key, true) | |||
p.setType("", tomlArrayHash) | |||
p.ordered = append(p.ordered, key) | |||
case itemKeyStart: | |||
kname := p.next() | |||
p.approxLine = kname.line | |||
p.currentKey = p.keyString(kname) | |||
val, typ := p.value(p.next()) | |||
p.setValue(p.currentKey, val) | |||
p.setType(p.currentKey, typ) | |||
p.ordered = append(p.ordered, p.context.add(p.currentKey)) | |||
p.currentKey = "" | |||
default: | |||
p.bug("Unexpected type at top level: %s", item.typ) | |||
} | |||
} | |||
// Gets a string for a key (or part of a key in a table name). | |||
func (p *parser) keyString(it item) string { | |||
switch it.typ { | |||
case itemText: | |||
return it.val | |||
case itemString, itemMultilineString, | |||
itemRawString, itemRawMultilineString: | |||
s, _ := p.value(it) | |||
return s.(string) | |||
default: | |||
p.bug("Unexpected key type: %s", it.typ) | |||
panic("unreachable") | |||
} | |||
} | |||
// value translates an expected value from the lexer into a Go value wrapped | |||
// as an empty interface. | |||
func (p *parser) value(it item) (interface{}, tomlType) { | |||
switch it.typ { | |||
case itemString: | |||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it) | |||
case itemMultilineString: | |||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) | |||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) | |||
case itemRawString: | |||
return it.val, p.typeOfPrimitive(it) | |||
case itemRawMultilineString: | |||
return stripFirstNewline(it.val), p.typeOfPrimitive(it) | |||
case itemBool: | |||
switch it.val { | |||
case "true": | |||
return true, p.typeOfPrimitive(it) | |||
case "false": | |||
return false, p.typeOfPrimitive(it) | |||
} | |||
p.bug("Expected boolean value, but got '%s'.", it.val) | |||
case itemInteger: | |||
if !numUnderscoresOK(it.val) { | |||
p.panicf("Invalid integer %q: underscores must be surrounded by digits", | |||
it.val) | |||
} | |||
val := strings.Replace(it.val, "_", "", -1) | |||
num, err := strconv.ParseInt(val, 10, 64) | |||
if err != nil { | |||
// Distinguish integer values. Normally, it'd be a bug if the lexer | |||
// provides an invalid integer, but it's possible that the number is | |||
// out of range of valid values (which the lexer cannot determine). | |||
// So mark the former as a bug but the latter as a legitimate user | |||
// error. | |||
if e, ok := err.(*strconv.NumError); ok && | |||
e.Err == strconv.ErrRange { | |||
p.panicf("Integer '%s' is out of the range of 64-bit "+ | |||
"signed integers.", it.val) | |||
} else { | |||
p.bug("Expected integer value, but got '%s'.", it.val) | |||
} | |||
} | |||
return num, p.typeOfPrimitive(it) | |||
case itemFloat: | |||
parts := strings.FieldsFunc(it.val, func(r rune) bool { | |||
switch r { | |||
case '.', 'e', 'E': | |||
return true | |||
} | |||
return false | |||
}) | |||
for _, part := range parts { | |||
if !numUnderscoresOK(part) { | |||
p.panicf("Invalid float %q: underscores must be "+ | |||
"surrounded by digits", it.val) | |||
} | |||
} | |||
if !numPeriodsOK(it.val) { | |||
// As a special case, numbers like '123.' or '1.e2', | |||
// which are valid as far as Go/strconv are concerned, | |||
// must be rejected because TOML says that a fractional | |||
// part consists of '.' followed by 1+ digits. | |||
p.panicf("Invalid float %q: '.' must be followed "+ | |||
"by one or more digits", it.val) | |||
} | |||
val := strings.Replace(it.val, "_", "", -1) | |||
num, err := strconv.ParseFloat(val, 64) | |||
if err != nil { | |||
if e, ok := err.(*strconv.NumError); ok && | |||
e.Err == strconv.ErrRange { | |||
p.panicf("Float '%s' is out of the range of 64-bit "+ | |||
"IEEE-754 floating-point numbers.", it.val) | |||
} else { | |||
p.panicf("Invalid float value: %q", it.val) | |||
} | |||
} | |||
return num, p.typeOfPrimitive(it) | |||
case itemDatetime: | |||
var t time.Time | |||
var ok bool | |||
var err error | |||
for _, format := range []string{ | |||
"2006-01-02T15:04:05Z07:00", | |||
"2006-01-02T15:04:05", | |||
"2006-01-02", | |||
} { | |||
t, err = time.ParseInLocation(format, it.val, time.Local) | |||
if err == nil { | |||
ok = true | |||
break | |||
} | |||
} | |||
if !ok { | |||
p.panicf("Invalid TOML Datetime: %q.", it.val) | |||
} | |||
return t, p.typeOfPrimitive(it) | |||
case itemArray: | |||
array := make([]interface{}, 0) | |||
types := make([]tomlType, 0) | |||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() { | |||
if it.typ == itemCommentStart { | |||
p.expect(itemText) | |||
continue | |||
} | |||
val, typ := p.value(it) | |||
array = append(array, val) | |||
types = append(types, typ) | |||
} | |||
return array, p.typeOfArray(types) | |||
case itemInlineTableStart: | |||
var ( | |||
hash = make(map[string]interface{}) | |||
outerContext = p.context | |||
outerKey = p.currentKey | |||
) | |||
p.context = append(p.context, p.currentKey) | |||
p.currentKey = "" | |||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { | |||
if it.typ != itemKeyStart { | |||
p.bug("Expected key start but instead found %q, around line %d", | |||
it.val, p.approxLine) | |||
} | |||
if it.typ == itemCommentStart { | |||
p.expect(itemText) | |||
continue | |||
} | |||
// retrieve key | |||
k := p.next() | |||
p.approxLine = k.line | |||
kname := p.keyString(k) | |||
// retrieve value | |||
p.currentKey = kname | |||
val, typ := p.value(p.next()) | |||
// make sure we keep metadata up to date | |||
p.setType(kname, typ) | |||
p.ordered = append(p.ordered, p.context.add(p.currentKey)) | |||
hash[kname] = val | |||
} | |||
p.context = outerContext | |||
p.currentKey = outerKey | |||
return hash, tomlHash | |||
} | |||
p.bug("Unexpected value type: %s", it.typ) | |||
panic("unreachable") | |||
} | |||
// numUnderscoresOK checks whether each underscore in s is surrounded by | |||
// characters that are not underscores. | |||
func numUnderscoresOK(s string) bool { | |||
accept := false | |||
for _, r := range s { | |||
if r == '_' { | |||
if !accept { | |||
return false | |||
} | |||
accept = false | |||
continue | |||
} | |||
accept = true | |||
} | |||
return accept | |||
} | |||
// numPeriodsOK checks whether every period in s is followed by a digit. | |||
func numPeriodsOK(s string) bool { | |||
period := false | |||
for _, r := range s { | |||
if period && !isDigit(r) { | |||
return false | |||
} | |||
period = r == '.' | |||
} | |||
return !period | |||
} | |||
// establishContext sets the current context of the parser, | |||
// where the context is either a hash or an array of hashes. Which one is | |||
// set depends on the value of the `array` parameter. | |||
// | |||
// Establishing the context also makes sure that the key isn't a duplicate, and | |||
// will create implicit hashes automatically. | |||
func (p *parser) establishContext(key Key, array bool) { | |||
var ok bool | |||
// Always start at the top level and drill down for our context. | |||
hashContext := p.mapping | |||
keyContext := make(Key, 0) | |||
// We only need implicit hashes for key[0:-1] | |||
for _, k := range key[0 : len(key)-1] { | |||
_, ok = hashContext[k] | |||
keyContext = append(keyContext, k) | |||
// No key? Make an implicit hash and move on. | |||
if !ok { | |||
p.addImplicit(keyContext) | |||
hashContext[k] = make(map[string]interface{}) | |||
} | |||
// If the hash context is actually an array of tables, then set | |||
// the hash context to the last element in that array. | |||
// | |||
// Otherwise, it better be a table, since this MUST be a key group (by | |||
// virtue of it not being the last element in a key). | |||
switch t := hashContext[k].(type) { | |||
case []map[string]interface{}: | |||
hashContext = t[len(t)-1] | |||
case map[string]interface{}: | |||
hashContext = t | |||
default: | |||
p.panicf("Key '%s' was already created as a hash.", keyContext) | |||
} | |||
} | |||
p.context = keyContext | |||
if array { | |||
// If this is the first element for this array, then allocate a new | |||
// list of tables for it. | |||
k := key[len(key)-1] | |||
if _, ok := hashContext[k]; !ok { | |||
hashContext[k] = make([]map[string]interface{}, 0, 5) | |||
} | |||
// Add a new table. But make sure the key hasn't already been used | |||
// for something else. | |||
if hash, ok := hashContext[k].([]map[string]interface{}); ok { | |||
hashContext[k] = append(hash, make(map[string]interface{})) | |||
} else { | |||
p.panicf("Key '%s' was already created and cannot be used as "+ | |||
"an array.", keyContext) | |||
} | |||
} else { | |||
p.setValue(key[len(key)-1], make(map[string]interface{})) | |||
} | |||
p.context = append(p.context, key[len(key)-1]) | |||
} | |||
// setValue sets the given key to the given value in the current context. | |||
// It will make sure that the key hasn't already been defined, account for | |||
// implicit key groups. | |||
func (p *parser) setValue(key string, value interface{}) { | |||
var tmpHash interface{} | |||
var ok bool | |||
hash := p.mapping | |||
keyContext := make(Key, 0) | |||
for _, k := range p.context { | |||
keyContext = append(keyContext, k) | |||
if tmpHash, ok = hash[k]; !ok { | |||
p.bug("Context for key '%s' has not been established.", keyContext) | |||
} | |||
switch t := tmpHash.(type) { | |||
case []map[string]interface{}: | |||
// The context is a table of hashes. Pick the most recent table | |||
// defined as the current hash. | |||
hash = t[len(t)-1] | |||
case map[string]interface{}: | |||
hash = t | |||
default: | |||
p.bug("Expected hash to have type 'map[string]interface{}', but "+ | |||
"it has '%T' instead.", tmpHash) | |||
} | |||
} | |||
keyContext = append(keyContext, key) | |||
if _, ok := hash[key]; ok { | |||
// Typically, if the given key has already been set, then we have | |||
// to raise an error since duplicate keys are disallowed. However, | |||
// it's possible that a key was previously defined implicitly. In this | |||
// case, it is allowed to be redefined concretely. (See the | |||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) | |||
// | |||
// But we have to make sure to stop marking it as an implicit. (So that | |||
// another redefinition provokes an error.) | |||
// | |||
// Note that since it has already been defined (as a hash), we don't | |||
// want to overwrite it. So our business is done. | |||
if p.isImplicit(keyContext) { | |||
p.removeImplicit(keyContext) | |||
return | |||
} | |||
// Otherwise, we have a concrete key trying to override a previous | |||
// key, which is *always* wrong. | |||
p.panicf("Key '%s' has already been defined.", keyContext) | |||
} | |||
hash[key] = value | |||
} | |||
// setType sets the type of a particular value at a given key. | |||
// It should be called immediately AFTER setValue. | |||
// | |||
// Note that if `key` is empty, then the type given will be applied to the | |||
// current context (which is either a table or an array of tables). | |||
func (p *parser) setType(key string, typ tomlType) { | |||
keyContext := make(Key, 0, len(p.context)+1) | |||
for _, k := range p.context { | |||
keyContext = append(keyContext, k) | |||
} | |||
if len(key) > 0 { // allow type setting for hashes | |||
keyContext = append(keyContext, key) | |||
} | |||
p.types[keyContext.String()] = typ | |||
} | |||
// addImplicit sets the given Key as having been created implicitly. | |||
func (p *parser) addImplicit(key Key) { | |||
p.implicits[key.String()] = true | |||
} | |||
// removeImplicit stops tagging the given key as having been implicitly | |||
// created. | |||
func (p *parser) removeImplicit(key Key) { | |||
p.implicits[key.String()] = false | |||
} | |||
// isImplicit returns true if the key group pointed to by the key was created | |||
// implicitly. | |||
func (p *parser) isImplicit(key Key) bool { | |||
return p.implicits[key.String()] | |||
} | |||
// current returns the full key name of the current context. | |||
func (p *parser) current() string { | |||
if len(p.currentKey) == 0 { | |||
return p.context.String() | |||
} | |||
if len(p.context) == 0 { | |||
return p.currentKey | |||
} | |||
return fmt.Sprintf("%s.%s", p.context, p.currentKey) | |||
} | |||
func stripFirstNewline(s string) string { | |||
if len(s) == 0 || s[0] != '\n' { | |||
return s | |||
} | |||
return s[1:] | |||
} | |||
func stripEscapedWhitespace(s string) string { | |||
esc := strings.Split(s, "\\\n") | |||
if len(esc) > 1 { | |||
for i := 1; i < len(esc); i++ { | |||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) | |||
} | |||
} | |||
return strings.Join(esc, "") | |||
} | |||
func (p *parser) replaceEscapes(str string) string { | |||
var replaced []rune | |||
s := []byte(str) | |||
r := 0 | |||
for r < len(s) { | |||
if s[r] != '\\' { | |||
c, size := utf8.DecodeRune(s[r:]) | |||
r += size | |||
replaced = append(replaced, c) | |||
continue | |||
} | |||
r += 1 | |||
if r >= len(s) { | |||
p.bug("Escape sequence at end of string.") | |||
return "" | |||
} | |||
switch s[r] { | |||
default: | |||
p.bug("Expected valid escape code after \\, but got %q.", s[r]) | |||
return "" | |||
case 'b': | |||
replaced = append(replaced, rune(0x0008)) | |||
r += 1 | |||
case 't': | |||
replaced = append(replaced, rune(0x0009)) | |||
r += 1 | |||
case 'n': | |||
replaced = append(replaced, rune(0x000A)) | |||
r += 1 | |||
case 'f': | |||
replaced = append(replaced, rune(0x000C)) | |||
r += 1 | |||
case 'r': | |||
replaced = append(replaced, rune(0x000D)) | |||
r += 1 | |||
case '"': | |||
replaced = append(replaced, rune(0x0022)) | |||
r += 1 | |||
case '\\': | |||
replaced = append(replaced, rune(0x005C)) | |||
r += 1 | |||
case 'u': | |||
// At this point, we know we have a Unicode escape of the form | |||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this | |||
// for us.) | |||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) | |||
replaced = append(replaced, escaped) | |||
r += 5 | |||
case 'U': | |||
// At this point, we know we have a Unicode escape of the form | |||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this | |||
// for us.) | |||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) | |||
replaced = append(replaced, escaped) | |||
r += 9 | |||
} | |||
} | |||
return string(replaced) | |||
} | |||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune { | |||
s := string(bs) | |||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) | |||
if err != nil { | |||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+ | |||
"lexer claims it's OK: %s", s, err) | |||
} | |||
if !utf8.ValidRune(rune(hex)) { | |||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) | |||
} | |||
return rune(hex) | |||
} | |||
func isStringType(ty itemType) bool { | |||
return ty == itemString || ty == itemMultilineString || | |||
ty == itemRawString || ty == itemRawMultilineString | |||
} |
@ -0,0 +1,91 @@ | |||
package toml | |||
// tomlType represents any Go type that corresponds to a TOML type. | |||
// While the first draft of the TOML spec has a simplistic type system that | |||
// probably doesn't need this level of sophistication, we seem to be militating | |||
// toward adding real composite types. | |||
type tomlType interface { | |||
typeString() string | |||
} | |||
// typeEqual accepts any two types and returns true if they are equal. | |||
func typeEqual(t1, t2 tomlType) bool { | |||
if t1 == nil || t2 == nil { | |||
return false | |||
} | |||
return t1.typeString() == t2.typeString() | |||
} | |||
func typeIsHash(t tomlType) bool { | |||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) | |||
} | |||
type tomlBaseType string | |||
func (btype tomlBaseType) typeString() string { | |||
return string(btype) | |||
} | |||
func (btype tomlBaseType) String() string { | |||
return btype.typeString() | |||
} | |||
var ( | |||
tomlInteger tomlBaseType = "Integer" | |||
tomlFloat tomlBaseType = "Float" | |||
tomlDatetime tomlBaseType = "Datetime" | |||
tomlString tomlBaseType = "String" | |||
tomlBool tomlBaseType = "Bool" | |||
tomlArray tomlBaseType = "Array" | |||
tomlHash tomlBaseType = "Hash" | |||
tomlArrayHash tomlBaseType = "ArrayHash" | |||
) | |||
// typeOfPrimitive returns a tomlType of any primitive value in TOML. | |||
// Primitive values are: Integer, Float, Datetime, String and Bool. | |||
// | |||
// Passing a lexer item other than the following will cause a BUG message | |||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. | |||
func (p *parser) typeOfPrimitive(lexItem item) tomlType { | |||
switch lexItem.typ { | |||
case itemInteger: | |||
return tomlInteger | |||
case itemFloat: | |||
return tomlFloat | |||
case itemDatetime: | |||
return tomlDatetime | |||
case itemString: | |||
return tomlString | |||
case itemMultilineString: | |||
return tomlString | |||
case itemRawString: | |||
return tomlString | |||
case itemRawMultilineString: | |||
return tomlString | |||
case itemBool: | |||
return tomlBool | |||
} | |||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) | |||
panic("unreachable") | |||
} | |||
// typeOfArray returns a tomlType for an array given a list of types of its | |||
// values. | |||
// | |||
// In the current spec, if an array is homogeneous, then its type is always | |||
// "Array". If the array is not homogeneous, an error is generated. | |||
func (p *parser) typeOfArray(types []tomlType) tomlType { | |||
// Empty arrays are cool. | |||
if len(types) == 0 { | |||
return tomlArray | |||
} | |||
theType := types[0] | |||
for _, t := range types[1:] { | |||
if !typeEqual(theType, t) { | |||
p.panicf("Array contains values of type '%s' and '%s', but "+ | |||
"arrays must be homogeneous.", theType, t) | |||
} | |||
} | |||
return tomlArray | |||
} |
@ -0,0 +1,242 @@ | |||
package toml | |||
// Struct field handling is adapted from code in encoding/json: | |||
// | |||
// Copyright 2010 The Go Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style | |||
// license that can be found in the Go distribution. | |||
import ( | |||
"reflect" | |||
"sort" | |||
"sync" | |||
) | |||
// A field represents a single field found in a struct. | |||
type field struct { | |||
name string // the name of the field (`toml` tag included) | |||
tag bool // whether field has a `toml` tag | |||
index []int // represents the depth of an anonymous field | |||
typ reflect.Type // the type of the field | |||
} | |||
// byName sorts field by name, breaking ties with depth, | |||
// then breaking ties with "name came from toml tag", then | |||
// breaking ties with index sequence. | |||
type byName []field | |||
func (x byName) Len() int { return len(x) } | |||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } | |||
func (x byName) Less(i, j int) bool { | |||
if x[i].name != x[j].name { | |||
return x[i].name < x[j].name | |||
} | |||
if len(x[i].index) != len(x[j].index) { | |||
return len(x[i].index) < len(x[j].index) | |||
} | |||
if x[i].tag != x[j].tag { | |||
return x[i].tag | |||
} | |||
return byIndex(x).Less(i, j) | |||
} | |||
// byIndex sorts field by index sequence. | |||
type byIndex []field | |||
func (x byIndex) Len() int { return len(x) } | |||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } | |||
func (x byIndex) Less(i, j int) bool { | |||
for k, xik := range x[i].index { | |||
if k >= len(x[j].index) { | |||
return false | |||
} | |||
if xik != x[j].index[k] { | |||
return xik < x[j].index[k] | |||
} | |||
} | |||
return len(x[i].index) < len(x[j].index) | |||
} | |||
// typeFields returns a list of fields that TOML should recognize for the given | |||
// type. The algorithm is breadth-first search over the set of structs to | |||
// include - the top struct and then any reachable anonymous structs. | |||
func typeFields(t reflect.Type) []field { | |||
// Anonymous fields to explore at the current level and the next. | |||
current := []field{} | |||
next := []field{{typ: t}} | |||
// Count of queued names for current level and the next. | |||
count := map[reflect.Type]int{} | |||
nextCount := map[reflect.Type]int{} | |||
// Types already visited at an earlier level. | |||
visited := map[reflect.Type]bool{} | |||
// Fields found. | |||
var fields []field | |||
for len(next) > 0 { | |||
current, next = next, current[:0] | |||
count, nextCount = nextCount, map[reflect.Type]int{} | |||
for _, f := range current { | |||
if visited[f.typ] { | |||
continue | |||
} | |||
visited[f.typ] = true | |||
// Scan f.typ for fields to include. | |||
for i := 0; i < f.typ.NumField(); i++ { | |||
sf := f.typ.Field(i) | |||
if sf.PkgPath != "" && !sf.Anonymous { // unexported | |||
continue | |||
} | |||
opts := getOptions(sf.Tag) | |||
if opts.skip { | |||
continue | |||
} | |||
index := make([]int, len(f.index)+1) | |||
copy(index, f.index) | |||
index[len(f.index)] = i | |||
ft := sf.Type | |||
if ft.Name() == "" && ft.Kind() == reflect.Ptr { | |||
// Follow pointer. | |||
ft = ft.Elem() | |||
} | |||
// Record found field and index sequence. | |||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { | |||
tagged := opts.name != "" | |||
name := opts.name | |||
if name == "" { | |||
name = sf.Name | |||
} | |||
fields = append(fields, field{name, tagged, index, ft}) | |||
if count[f.typ] > 1 { | |||
// If there were multiple instances, add a second, | |||
// so that the annihilation code will see a duplicate. | |||
// It only cares about the distinction between 1 or 2, | |||
// so don't bother generating any more copies. | |||
fields = append(fields, fields[len(fields)-1]) | |||
} | |||
continue | |||
} | |||
// Record new anonymous struct to explore in next round. | |||
nextCount[ft]++ | |||
if nextCount[ft] == 1 { | |||
f := field{name: ft.Name(), index: index, typ: ft} | |||
next = append(next, f) | |||
} | |||
} | |||
} | |||
} | |||
sort.Sort(byName(fields)) | |||
// Delete all fields that are hidden by the Go rules for embedded fields, | |||
// except that fields with TOML tags are promoted. | |||
// The fields are sorted in primary order of name, secondary order | |||
// of field index length. Loop over names; for each name, delete | |||
// hidden fields by choosing the one dominant field that survives. | |||
out := fields[:0] | |||
for advance, i := 0, 0; i < len(fields); i += advance { | |||
// One iteration per name. | |||
// Find the sequence of fields with the name of this first field. | |||
fi := fields[i] | |||
name := fi.name | |||
for advance = 1; i+advance < len(fields); advance++ { | |||
fj := fields[i+advance] | |||
if fj.name != name { | |||
break | |||
} | |||
} | |||
if advance == 1 { // Only one field with this name | |||
out = append(out, fi) | |||
continue | |||
} | |||
dominant, ok := dominantField(fields[i : i+advance]) | |||
if ok { | |||
out = append(out, dominant) | |||
} | |||
} | |||
fields = out | |||
sort.Sort(byIndex(fields)) | |||
return fields | |||
} | |||
// dominantField looks through the fields, all of which are known to | |||
// have the same name, to find the single field that dominates the | |||
// others using Go's embedding rules, modified by the presence of | |||
// TOML tags. If there are multiple top-level fields, the boolean | |||
// will be false: This condition is an error in Go and we skip all | |||
// the fields. | |||
func dominantField(fields []field) (field, bool) { | |||
// The fields are sorted in increasing index-length order. The winner | |||
// must therefore be one with the shortest index length. Drop all | |||
// longer entries, which is easy: just truncate the slice. | |||
length := len(fields[0].index) | |||
tagged := -1 // Index of first tagged field. | |||
for i, f := range fields { | |||
if len(f.index) > length { | |||
fields = fields[:i] | |||
break | |||
} | |||
if f.tag { | |||
if tagged >= 0 { | |||
// Multiple tagged fields at the same level: conflict. | |||
// Return no field. | |||
return field{}, false | |||
} | |||
tagged = i | |||
} | |||
} | |||
if tagged >= 0 { | |||
return fields[tagged], true | |||
} | |||
// All remaining fields have the same length. If there's more than one, | |||
// we have a conflict (two fields named "X" at the same level) and we | |||
// return no field. | |||
if len(fields) > 1 { | |||
return field{}, false | |||
} | |||
return fields[0], true | |||
} | |||
var fieldCache struct { | |||
sync.RWMutex | |||
m map[reflect.Type][]field | |||
} | |||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. | |||
func cachedTypeFields(t reflect.Type) []field { | |||
fieldCache.RLock() | |||
f := fieldCache.m[t] | |||
fieldCache.RUnlock() | |||
if f != nil { | |||
return f | |||
} | |||
// Compute fields without lock. | |||
// Might duplicate effort but won't hold other computations back. | |||
f = typeFields(t) | |||
if f == nil { | |||
f = []field{} | |||
} | |||
fieldCache.Lock() | |||
if fieldCache.m == nil { | |||
fieldCache.m = map[reflect.Type][]field{} | |||
} | |||
fieldCache.m[t] = f | |||
fieldCache.Unlock() | |||
return f | |||
} |
@ -0,0 +1,19 @@ | |||
Copyright (c) 2013 Dustin Sallings | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in | |||
all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
THE SOFTWARE. |
@ -0,0 +1,333 @@ | |||
package memcached | |||
import ( | |||
"bytes" | |||
"encoding/binary" | |||
"fmt" | |||
"io" | |||
"math" | |||
"github.com/couchbase/gomemcached" | |||
"github.com/couchbase/goutils/logging" | |||
) | |||
// TAP protocol docs: <http://www.couchbase.com/wiki/display/couchbase/TAP+Protocol> | |||
// TapOpcode is the tap operation type (found in TapEvent) | |||
type TapOpcode uint8 | |||
// Tap opcode values. | |||
const ( | |||
TapBeginBackfill = TapOpcode(iota) | |||
TapEndBackfill | |||
TapMutation | |||
TapDeletion | |||
TapCheckpointStart | |||
TapCheckpointEnd | |||
tapEndStream | |||
) | |||
const tapMutationExtraLen = 16 | |||
var tapOpcodeNames map[TapOpcode]string | |||
func init() { | |||
tapOpcodeNames = map[TapOpcode]string{ | |||
TapBeginBackfill: "BeginBackfill", | |||
TapEndBackfill: "EndBackfill", | |||
TapMutation: "Mutation", | |||
TapDeletion: "Deletion", | |||
TapCheckpointStart: "TapCheckpointStart", | |||
TapCheckpointEnd: "TapCheckpointEnd", | |||
tapEndStream: "EndStream", | |||
} | |||
} | |||
func (opcode TapOpcode) String() string { | |||
name := tapOpcodeNames[opcode] | |||
if name == "" { | |||
name = fmt.Sprintf("#%d", opcode) | |||
} | |||
return name | |||
} | |||
// TapEvent is a TAP notification of an operation on the server. | |||
type TapEvent struct { | |||
Opcode TapOpcode // Type of event | |||
VBucket uint16 // VBucket this event applies to | |||
Flags uint32 // Item flags | |||
Expiry uint32 // Item expiration time | |||
Key, Value []byte // Item key/value | |||
Cas uint64 | |||
} | |||
func makeTapEvent(req gomemcached.MCRequest) *TapEvent { | |||
event := TapEvent{ | |||
VBucket: req.VBucket, | |||
} | |||
switch req.Opcode { | |||
case gomemcached.TAP_MUTATION: | |||
event.Opcode = TapMutation | |||
event.Key = req.Key | |||
event.Value = req.Body | |||
event.Cas = req.Cas | |||
case gomemcached.TAP_DELETE: | |||
event.Opcode = TapDeletion | |||
event.Key = req.Key | |||
event.Cas = req.Cas | |||
case gomemcached.TAP_CHECKPOINT_START: | |||
event.Opcode = TapCheckpointStart | |||
case gomemcached.TAP_CHECKPOINT_END: | |||
event.Opcode = TapCheckpointEnd | |||
case gomemcached.TAP_OPAQUE: | |||
if len(req.Extras) < 8+4 { | |||
return nil | |||
} | |||
switch op := int(binary.BigEndian.Uint32(req.Extras[8:])); op { | |||
case gomemcached.TAP_OPAQUE_INITIAL_VBUCKET_STREAM: | |||
event.Opcode = TapBeginBackfill | |||
case gomemcached.TAP_OPAQUE_CLOSE_BACKFILL: | |||
event.Opcode = TapEndBackfill | |||
case gomemcached.TAP_OPAQUE_CLOSE_TAP_STREAM: | |||
event.Opcode = tapEndStream | |||
case gomemcached.TAP_OPAQUE_ENABLE_AUTO_NACK: | |||
return nil | |||
case gomemcached.TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC: | |||
return nil | |||
default: | |||
logging.Infof("TapFeed: Ignoring TAP_OPAQUE/%d", op) | |||
return nil // unknown opaque event | |||
} | |||
case gomemcached.NOOP: | |||
return nil // ignore | |||
default: | |||
logging.Infof("TapFeed: Ignoring %s", req.Opcode) | |||
return nil // unknown event | |||
} | |||
if len(req.Extras) >= tapMutationExtraLen && | |||
(event.Opcode == TapMutation || event.Opcode == TapDeletion) { | |||
event.Flags = binary.BigEndian.Uint32(req.Extras[8:]) | |||
event.Expiry = binary.BigEndian.Uint32(req.Extras[12:]) | |||
} | |||
return &event | |||
} | |||
func (event TapEvent) String() string { | |||
switch event.Opcode { | |||
case TapBeginBackfill, TapEndBackfill, TapCheckpointStart, TapCheckpointEnd: | |||
return fmt.Sprintf("<TapEvent %s, vbucket=%d>", | |||
event.Opcode, event.VBucket) | |||
default: | |||
return fmt.Sprintf("<TapEvent %s, key=%q (%d bytes) flags=%x, exp=%d>", | |||
event.Opcode, event.Key, len(event.Value), | |||
event.Flags, event.Expiry) | |||
} | |||
} | |||
// TapArguments are parameters for requesting a TAP feed. | |||
// | |||
// Call DefaultTapArguments to get a default one. | |||
type TapArguments struct { | |||
// Timestamp of oldest item to send. | |||
// | |||
// Use TapNoBackfill to suppress all past items. | |||
Backfill uint64 | |||
// If set, server will disconnect after sending existing items. | |||
Dump bool | |||
// The indices of the vbuckets to watch; empty/nil to watch all. | |||
VBuckets []uint16 | |||
// Transfers ownership of vbuckets during cluster rebalance. | |||
Takeover bool | |||
// If true, server will wait for client ACK after every notification. | |||
SupportAck bool | |||
// If true, client doesn't want values so server shouldn't send them. | |||
KeysOnly bool | |||
// If true, client wants the server to send checkpoint events. | |||
Checkpoint bool | |||
// Optional identifier to use for this client, to allow reconnects | |||
ClientName string | |||
// Registers this client (by name) till explicitly deregistered. | |||
RegisteredClient bool | |||
} | |||
// Value for TapArguments.Backfill denoting that no past events at all | |||
// should be sent. | |||
const TapNoBackfill = math.MaxUint64 | |||
// DefaultTapArguments returns a default set of parameter values to | |||
// pass to StartTapFeed. | |||
func DefaultTapArguments() TapArguments { | |||
return TapArguments{ | |||
Backfill: TapNoBackfill, | |||
} | |||
} | |||
func (args *TapArguments) flags() []byte { | |||
var flags gomemcached.TapConnectFlag | |||
if args.Backfill != 0 { | |||
flags |= gomemcached.BACKFILL | |||
} | |||
if args.Dump { | |||
flags |= gomemcached.DUMP | |||
} | |||
if len(args.VBuckets) > 0 { | |||
flags |= gomemcached.LIST_VBUCKETS | |||
} | |||
if args.Takeover { | |||
flags |= gomemcached.TAKEOVER_VBUCKETS | |||
} | |||
if args.SupportAck { | |||
flags |= gomemcached.SUPPORT_ACK | |||
} | |||
if args.KeysOnly { | |||
flags |= gomemcached.REQUEST_KEYS_ONLY | |||
} | |||
if args.Checkpoint { | |||
flags |= gomemcached.CHECKPOINT | |||
} | |||
if args.RegisteredClient { | |||
flags |= gomemcached.REGISTERED_CLIENT | |||
} | |||
encoded := make([]byte, 4) | |||
binary.BigEndian.PutUint32(encoded, uint32(flags)) | |||
return encoded | |||
} | |||
func must(err error) { | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (args *TapArguments) bytes() (rv []byte) { | |||
buf := bytes.NewBuffer([]byte{}) | |||
if args.Backfill > 0 { | |||
must(binary.Write(buf, binary.BigEndian, uint64(args.Backfill))) | |||
} | |||
if len(args.VBuckets) > 0 { | |||
must(binary.Write(buf, binary.BigEndian, uint16(len(args.VBuckets)))) | |||
for i := 0; i < len(args.VBuckets); i++ { | |||
must(binary.Write(buf, binary.BigEndian, uint16(args.VBuckets[i]))) | |||
} | |||
} | |||
return buf.Bytes() | |||
} | |||
// TapFeed represents a stream of events from a server. | |||
type TapFeed struct { | |||
C <-chan TapEvent | |||
Error error | |||
closer chan bool | |||
} | |||
// StartTapFeed starts a TAP feed on a client connection. | |||
// | |||
// The events can be read from the returned channel. The connection | |||
// can no longer be used for other purposes; it's now reserved for | |||
// receiving the TAP messages. To stop receiving events, close the | |||
// client connection. | |||
func (mc *Client) StartTapFeed(args TapArguments) (*TapFeed, error) { | |||
rq := &gomemcached.MCRequest{ | |||
Opcode: gomemcached.TAP_CONNECT, | |||
Key: []byte(args.ClientName), | |||
Extras: args.flags(), | |||
Body: args.bytes()} | |||
err := mc.Transmit(rq) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ch := make(chan TapEvent) | |||
feed := &TapFeed{ | |||
C: ch, | |||
closer: make(chan bool), | |||
} | |||
go mc.runFeed(ch, feed) | |||
return feed, nil | |||
} | |||
// TapRecvHook is called after every incoming tap packet is received. | |||
var TapRecvHook func(*gomemcached.MCRequest, int, error) | |||
// Internal goroutine that reads from the socket and writes events to | |||
// the channel | |||
func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) { | |||
defer close(ch) | |||
var headerBuf [gomemcached.HDR_LEN]byte | |||
loop: | |||
for { | |||
// Read the next request from the server. | |||
// | |||
// (Can't call mc.Receive() because it reads a | |||
// _response_ not a request.) | |||
var pkt gomemcached.MCRequest | |||
n, err := pkt.Receive(mc.conn, headerBuf[:]) | |||
if TapRecvHook != nil { | |||
TapRecvHook(&pkt, n, err) | |||
} | |||
if err != nil { | |||
if err != io.EOF { | |||
feed.Error = err | |||
} | |||
break loop | |||
} | |||
//logging.Infof("** TapFeed received %#v : %q", pkt, pkt.Body) | |||
if pkt.Opcode == gomemcached.TAP_CONNECT { | |||
// This is not an event from the server; it's | |||
// an error response to my connect request. | |||
feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body) | |||
break loop | |||
} | |||
event := makeTapEvent(pkt) | |||
if event != nil { | |||
if event.Opcode == tapEndStream { | |||
break loop | |||
} | |||
select { | |||
case ch <- *event: | |||
case <-feed.closer: | |||
break loop | |||
} | |||
} | |||
if len(pkt.Extras) >= 4 { | |||
reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:]) | |||
if reqFlags&gomemcached.TAP_ACK != 0 { | |||
if _, err := mc.sendAck(&pkt); err != nil { | |||
feed.Error = err | |||
break loop | |||
} | |||
} | |||
} | |||
} | |||
if err := mc.Close(); err != nil { | |||
logging.Errorf("Error closing memcached client: %v", err) | |||
} | |||
} | |||
func (mc *Client) sendAck(pkt *gomemcached.MCRequest) (int, error) { | |||
res := gomemcached.MCResponse{ | |||
Opcode: pkt.Opcode, | |||
Opaque: pkt.Opaque, | |||
Status: gomemcached.SUCCESS, | |||
} | |||
return res.Transmit(mc.conn) | |||
} | |||
// Close terminates a TapFeed. | |||
// | |||
// Call this if you stop using a TapFeed before its channel ends. | |||
func (feed *TapFeed) Close() { | |||
close(feed.closer) | |||
} |
@ -0,0 +1,67 @@ | |||
package memcached | |||
import ( | |||
"errors" | |||
"io" | |||
"github.com/couchbase/gomemcached" | |||
) | |||
var errNoConn = errors.New("no connection") | |||
// UnwrapMemcachedError converts memcached errors to normal responses. | |||
// | |||
// If the error is a memcached response, declare the error to be nil | |||
// so a client can handle the status without worrying about whether it | |||
// indicates success or failure. | |||
func UnwrapMemcachedError(rv *gomemcached.MCResponse, | |||
err error) (*gomemcached.MCResponse, error) { | |||
if rv == err { | |||
return rv, nil | |||
} | |||
return rv, err | |||
} | |||
// ReceiveHook is called after every packet is received (or attempted to be) | |||
var ReceiveHook func(*gomemcached.MCResponse, int, error) | |||
func getResponse(s io.Reader, hdrBytes []byte) (rv *gomemcached.MCResponse, n int, err error) { | |||
if s == nil { | |||
return nil, 0, errNoConn | |||
} | |||
rv = &gomemcached.MCResponse{} | |||
n, err = rv.Receive(s, hdrBytes) | |||
if ReceiveHook != nil { | |||
ReceiveHook(rv, n, err) | |||
} | |||
if err == nil && (rv.Status != gomemcached.SUCCESS && rv.Status != gomemcached.AUTH_CONTINUE) { | |||
err = rv | |||
} | |||
return rv, n, err | |||
} | |||
// TransmitHook is called after each packet is transmitted. | |||
var TransmitHook func(*gomemcached.MCRequest, int, error) | |||
func transmitRequest(o io.Writer, req *gomemcached.MCRequest) (int, error) { | |||
if o == nil { | |||
return 0, errNoConn | |||
} | |||
n, err := req.Transmit(o) | |||
if TransmitHook != nil { | |||
TransmitHook(req, n, err) | |||
} | |||
return n, err | |||
} | |||
func transmitResponse(o io.Writer, res *gomemcached.MCResponse) (int, error) { | |||
if o == nil { | |||
return 0, errNoConn | |||
} | |||
n, err := res.Transmit(o) | |||
return n, err | |||
} |
@ -0,0 +1,335 @@ | |||
// Package gomemcached is binary protocol packet formats and constants. | |||
package gomemcached | |||
import ( | |||
"fmt" | |||
) | |||
const ( | |||
REQ_MAGIC = 0x80 | |||
RES_MAGIC = 0x81 | |||
) | |||
// CommandCode for memcached packets. | |||
type CommandCode uint8 | |||
const ( | |||
GET = CommandCode(0x00) | |||
SET = CommandCode(0x01) | |||
ADD = CommandCode(0x02) | |||
REPLACE = CommandCode(0x03) | |||
DELETE = CommandCode(0x04) | |||
INCREMENT = CommandCode(0x05) | |||
DECREMENT = CommandCode(0x06) | |||
QUIT = CommandCode(0x07) | |||
FLUSH = CommandCode(0x08) | |||
GETQ = CommandCode(0x09) | |||
NOOP = CommandCode(0x0a) | |||
VERSION = CommandCode(0x0b) | |||
GETK = CommandCode(0x0c) | |||
GETKQ = CommandCode(0x0d) | |||
APPEND = CommandCode(0x0e) | |||
PREPEND = CommandCode(0x0f) | |||
STAT = CommandCode(0x10) | |||
SETQ = CommandCode(0x11) | |||
ADDQ = CommandCode(0x12) | |||
REPLACEQ = CommandCode(0x13) | |||
DELETEQ = CommandCode(0x14) | |||
INCREMENTQ = CommandCode(0x15) | |||
DECREMENTQ = CommandCode(0x16) | |||
QUITQ = CommandCode(0x17) | |||
FLUSHQ = CommandCode(0x18) | |||
APPENDQ = CommandCode(0x19) | |||
AUDIT = CommandCode(0x27) | |||
PREPENDQ = CommandCode(0x1a) | |||
GAT = CommandCode(0x1d) | |||
HELLO = CommandCode(0x1f) | |||
RGET = CommandCode(0x30) | |||
RSET = CommandCode(0x31) | |||
RSETQ = CommandCode(0x32) | |||
RAPPEND = CommandCode(0x33) | |||
RAPPENDQ = CommandCode(0x34) | |||
RPREPEND = CommandCode(0x35) | |||
RPREPENDQ = CommandCode(0x36) | |||
RDELETE = CommandCode(0x37) | |||
RDELETEQ = CommandCode(0x38) | |||
RINCR = CommandCode(0x39) | |||
RINCRQ = CommandCode(0x3a) | |||
RDECR = CommandCode(0x3b) | |||
RDECRQ = CommandCode(0x3c) | |||
SASL_LIST_MECHS = CommandCode(0x20) | |||
SASL_AUTH = CommandCode(0x21) | |||
SASL_STEP = CommandCode(0x22) | |||
SET_VBUCKET = CommandCode(0x3d) | |||
TAP_CONNECT = CommandCode(0x40) // Client-sent request to initiate Tap feed | |||
TAP_MUTATION = CommandCode(0x41) // Notification of a SET/ADD/REPLACE/etc. on the server | |||
TAP_DELETE = CommandCode(0x42) // Notification of a DELETE on the server | |||
TAP_FLUSH = CommandCode(0x43) // Replicates a flush_all command | |||
TAP_OPAQUE = CommandCode(0x44) // Opaque control data from the engine | |||
TAP_VBUCKET_SET = CommandCode(0x45) // Sets state of vbucket in receiver (used in takeover) | |||
TAP_CHECKPOINT_START = CommandCode(0x46) // Notifies start of new checkpoint | |||
TAP_CHECKPOINT_END = CommandCode(0x47) // Notifies end of checkpoint | |||
UPR_OPEN = CommandCode(0x50) // Open a UPR connection with a name | |||
UPR_ADDSTREAM = CommandCode(0x51) // Sent by ebucketMigrator to UPR Consumer | |||
UPR_CLOSESTREAM = CommandCode(0x52) // Sent by eBucketMigrator to UPR Consumer | |||
UPR_FAILOVERLOG = CommandCode(0x54) // Request failover logs | |||
UPR_STREAMREQ = CommandCode(0x53) // Stream request from consumer to producer | |||
UPR_STREAMEND = CommandCode(0x55) // Sent by producer when it has no more messages to stream | |||
UPR_SNAPSHOT = CommandCode(0x56) // Start of a new snapshot | |||
UPR_MUTATION = CommandCode(0x57) // Key mutation | |||
UPR_DELETION = CommandCode(0x58) // Key deletion | |||
UPR_EXPIRATION = CommandCode(0x59) // Key expiration | |||
UPR_FLUSH = CommandCode(0x5a) // Delete all the data for a vbucket | |||
UPR_NOOP = CommandCode(0x5c) // UPR NOOP | |||
UPR_BUFFERACK = CommandCode(0x5d) // UPR Buffer Acknowledgement | |||
UPR_CONTROL = CommandCode(0x5e) // Set flow control params | |||
SELECT_BUCKET = CommandCode(0x89) // Select bucket | |||
OBSERVE_SEQNO = CommandCode(0x91) // Sequence Number based Observe | |||
OBSERVE = CommandCode(0x92) | |||
GET_META = CommandCode(0xA0) // Get meta. returns with expiry, flags, cas etc | |||
SUBDOC_GET = CommandCode(0xc5) // Get subdoc. Returns with xattrs | |||
SUBDOC_MULTI_LOOKUP = CommandCode(0xd0) // Multi lookup. Doc xattrs and meta. | |||
) | |||
// command codes that are counted toward DCP control buffer | |||
// when DCP clients receive DCP messages with these command codes, they need to provide acknowledgement | |||
var BufferedCommandCodeMap = map[CommandCode]bool{ | |||
SET_VBUCKET: true, | |||
UPR_STREAMEND: true, | |||
UPR_SNAPSHOT: true, | |||
UPR_MUTATION: true, | |||
UPR_DELETION: true, | |||
UPR_EXPIRATION: true} | |||
// Status field for memcached response. | |||
type Status uint16 | |||
// Matches with protocol_binary.h as source of truth | |||
const ( | |||
SUCCESS = Status(0x00) | |||
KEY_ENOENT = Status(0x01) | |||
KEY_EEXISTS = Status(0x02) | |||
E2BIG = Status(0x03) | |||
EINVAL = Status(0x04) | |||
NOT_STORED = Status(0x05) | |||
DELTA_BADVAL = Status(0x06) | |||
NOT_MY_VBUCKET = Status(0x07) | |||
NO_BUCKET = Status(0x08) | |||
LOCKED = Status(0x09) | |||
AUTH_STALE = Status(0x1f) | |||
AUTH_ERROR = Status(0x20) | |||
AUTH_CONTINUE = Status(0x21) | |||
ERANGE = Status(0x22) | |||
ROLLBACK = Status(0x23) | |||
EACCESS = Status(0x24) | |||
NOT_INITIALIZED = Status(0x25) | |||
UNKNOWN_COMMAND = Status(0x81) | |||
ENOMEM = Status(0x82) | |||
NOT_SUPPORTED = Status(0x83) | |||
EINTERNAL = Status(0x84) | |||
EBUSY = Status(0x85) | |||
TMPFAIL = Status(0x86) | |||
// SUBDOC | |||
SUBDOC_PATH_NOT_FOUND = Status(0xc0) | |||
SUBDOC_BAD_MULTI = Status(0xcc) | |||
SUBDOC_MULTI_PATH_FAILURE_DELETED = Status(0xd3) | |||
) | |||
// for log redaction | |||
const ( | |||
UdTagBegin = "<ud>" | |||
UdTagEnd = "</ud>" | |||
) | |||
var isFatal = map[Status]bool{ | |||
DELTA_BADVAL: true, | |||
NO_BUCKET: true, | |||
AUTH_STALE: true, | |||
AUTH_ERROR: true, | |||
ERANGE: true, | |||
ROLLBACK: true, | |||
EACCESS: true, | |||
ENOMEM: true, | |||
NOT_SUPPORTED: true, | |||
} | |||
// the producer/consumer bit in dcp flags | |||
var DCP_PRODUCER uint32 = 0x01 | |||
// the include XATTRS bit in dcp flags | |||
var DCP_OPEN_INCLUDE_XATTRS uint32 = 0x04 | |||
// the include deletion time bit in dcp flags | |||
var DCP_OPEN_INCLUDE_DELETE_TIMES uint32 = 0x20 | |||
// Datatype to Include XATTRS in SUBDOC GET | |||
var SUBDOC_FLAG_XATTR uint8 = 0x04 | |||
// MCItem is an internal representation of an item. | |||
type MCItem struct { | |||
Cas uint64 | |||
Flags, Expiration uint32 | |||
Data []byte | |||
} | |||
// Number of bytes in a binary protocol header. | |||
const HDR_LEN = 24 | |||
// Mapping of CommandCode -> name of command (not exhaustive) | |||
var CommandNames map[CommandCode]string | |||
// StatusNames human readable names for memcached response. | |||
var StatusNames map[Status]string | |||
func init() { | |||
CommandNames = make(map[CommandCode]string) | |||
CommandNames[GET] = "GET" | |||
CommandNames[SET] = "SET" | |||
CommandNames[ADD] = "ADD" | |||
CommandNames[REPLACE] = "REPLACE" | |||
CommandNames[DELETE] = "DELETE" | |||
CommandNames[INCREMENT] = "INCREMENT" | |||
CommandNames[DECREMENT] = "DECREMENT" | |||
CommandNames[QUIT] = "QUIT" | |||
CommandNames[FLUSH] = "FLUSH" | |||
CommandNames[GETQ] = "GETQ" | |||
CommandNames[NOOP] = "NOOP" | |||
CommandNames[VERSION] = "VERSION" | |||
CommandNames[GETK] = "GETK" | |||
CommandNames[GETKQ] = "GETKQ" | |||
CommandNames[APPEND] = "APPEND" | |||
CommandNames[PREPEND] = "PREPEND" | |||
CommandNames[STAT] = "STAT" | |||
CommandNames[SETQ] = "SETQ" | |||
CommandNames[ADDQ] = "ADDQ" | |||
CommandNames[REPLACEQ] = "REPLACEQ" | |||
CommandNames[DELETEQ] = "DELETEQ" | |||
CommandNames[INCREMENTQ] = "INCREMENTQ" | |||
CommandNames[DECREMENTQ] = "DECREMENTQ" | |||
CommandNames[QUITQ] = "QUITQ" | |||
CommandNames[FLUSHQ] = "FLUSHQ" | |||
CommandNames[APPENDQ] = "APPENDQ" | |||
CommandNames[PREPENDQ] = "PREPENDQ" | |||
CommandNames[RGET] = "RGET" | |||
CommandNames[RSET] = "RSET" | |||
CommandNames[RSETQ] = "RSETQ" | |||
CommandNames[RAPPEND] = "RAPPEND" | |||
CommandNames[RAPPENDQ] = "RAPPENDQ" | |||
CommandNames[RPREPEND] = "RPREPEND" | |||
CommandNames[RPREPENDQ] = "RPREPENDQ" | |||
CommandNames[RDELETE] = "RDELETE" | |||
CommandNames[RDELETEQ] = "RDELETEQ" | |||
CommandNames[RINCR] = "RINCR" | |||
CommandNames[RINCRQ] = "RINCRQ" | |||
CommandNames[RDECR] = "RDECR" | |||
CommandNames[RDECRQ] = "RDECRQ" | |||
CommandNames[SASL_LIST_MECHS] = "SASL_LIST_MECHS" | |||
CommandNames[SASL_AUTH] = "SASL_AUTH" | |||
CommandNames[SASL_STEP] = "SASL_STEP" | |||
CommandNames[TAP_CONNECT] = "TAP_CONNECT" | |||
CommandNames[TAP_MUTATION] = "TAP_MUTATION" | |||
CommandNames[TAP_DELETE] = "TAP_DELETE" | |||
CommandNames[TAP_FLUSH] = "TAP_FLUSH" | |||
CommandNames[TAP_OPAQUE] = "TAP_OPAQUE" | |||
CommandNames[TAP_VBUCKET_SET] = "TAP_VBUCKET_SET" | |||
CommandNames[TAP_CHECKPOINT_START] = "TAP_CHECKPOINT_START" | |||
CommandNames[TAP_CHECKPOINT_END] = "TAP_CHECKPOINT_END" | |||
CommandNames[UPR_OPEN] = "UPR_OPEN" | |||
CommandNames[UPR_ADDSTREAM] = "UPR_ADDSTREAM" | |||
CommandNames[UPR_CLOSESTREAM] = "UPR_CLOSESTREAM" | |||
CommandNames[UPR_FAILOVERLOG] = "UPR_FAILOVERLOG" | |||
CommandNames[UPR_STREAMREQ] = "UPR_STREAMREQ" | |||
CommandNames[UPR_STREAMEND] = "UPR_STREAMEND" | |||
CommandNames[UPR_SNAPSHOT] = "UPR_SNAPSHOT" | |||
CommandNames[UPR_MUTATION] = "UPR_MUTATION" | |||
CommandNames[UPR_DELETION] = "UPR_DELETION" | |||
CommandNames[UPR_EXPIRATION] = "UPR_EXPIRATION" | |||
CommandNames[UPR_FLUSH] = "UPR_FLUSH" | |||
CommandNames[UPR_NOOP] = "UPR_NOOP" | |||
CommandNames[UPR_BUFFERACK] = "UPR_BUFFERACK" | |||
CommandNames[UPR_CONTROL] = "UPR_CONTROL" | |||
CommandNames[SUBDOC_GET] = "SUBDOC_GET" | |||
CommandNames[SUBDOC_MULTI_LOOKUP] = "SUBDOC_MULTI_LOOKUP" | |||
StatusNames = make(map[Status]string) | |||
StatusNames[SUCCESS] = "SUCCESS" | |||
StatusNames[KEY_ENOENT] = "KEY_ENOENT" | |||
StatusNames[KEY_EEXISTS] = "KEY_EEXISTS" | |||
StatusNames[E2BIG] = "E2BIG" | |||
StatusNames[EINVAL] = "EINVAL" | |||
StatusNames[NOT_STORED] = "NOT_STORED" | |||
StatusNames[DELTA_BADVAL] = "DELTA_BADVAL" | |||
StatusNames[NOT_MY_VBUCKET] = "NOT_MY_VBUCKET" | |||
StatusNames[NO_BUCKET] = "NO_BUCKET" | |||
StatusNames[AUTH_STALE] = "AUTH_STALE" | |||
StatusNames[AUTH_ERROR] = "AUTH_ERROR" | |||
StatusNames[AUTH_CONTINUE] = "AUTH_CONTINUE" | |||
StatusNames[ERANGE] = "ERANGE" | |||
StatusNames[ROLLBACK] = "ROLLBACK" | |||
StatusNames[EACCESS] = "EACCESS" | |||
StatusNames[NOT_INITIALIZED] = "NOT_INITIALIZED" | |||
StatusNames[UNKNOWN_COMMAND] = "UNKNOWN_COMMAND" | |||
StatusNames[ENOMEM] = "ENOMEM" | |||
StatusNames[NOT_SUPPORTED] = "NOT_SUPPORTED" | |||
StatusNames[EINTERNAL] = "EINTERNAL" | |||
StatusNames[EBUSY] = "EBUSY" | |||
StatusNames[TMPFAIL] = "TMPFAIL" | |||
StatusNames[SUBDOC_PATH_NOT_FOUND] = "SUBDOC_PATH_NOT_FOUND" | |||
StatusNames[SUBDOC_BAD_MULTI] = "SUBDOC_BAD_MULTI" | |||
} | |||
// String an op code. | |||
func (o CommandCode) String() (rv string) { | |||
rv = CommandNames[o] | |||
if rv == "" { | |||
rv = fmt.Sprintf("0x%02x", int(o)) | |||
} | |||
return rv | |||
} | |||
// String an op code. | |||
func (s Status) String() (rv string) { | |||
rv = StatusNames[s] | |||
if rv == "" { | |||
rv = fmt.Sprintf("0x%02x", int(s)) | |||
} | |||
return rv | |||
} | |||
// IsQuiet will return true if a command is a "quiet" command. | |||
func (o CommandCode) IsQuiet() bool { | |||
switch o { | |||
case GETQ, | |||
GETKQ, | |||
SETQ, | |||
ADDQ, | |||
REPLACEQ, | |||
DELETEQ, | |||
INCREMENTQ, | |||
DECREMENTQ, | |||
QUITQ, | |||
FLUSHQ, | |||
APPENDQ, | |||
PREPENDQ, | |||
RSETQ, | |||
RAPPENDQ, | |||
RPREPENDQ, | |||
RDELETEQ, | |||
RINCRQ, | |||
RDECRQ: | |||
return true | |||
} | |||
return false | |||
} |
@ -0,0 +1,197 @@ | |||
package gomemcached | |||
import ( | |||
"encoding/binary" | |||
"fmt" | |||
"io" | |||
) | |||
// The maximum reasonable body length to expect. | |||
// Anything larger than this will result in an error. | |||
// The current limit, 20MB, is the size limit supported by ep-engine. | |||
var MaxBodyLen = int(20 * 1024 * 1024) | |||
// MCRequest is memcached Request | |||
type MCRequest struct { | |||
// The command being issued | |||
Opcode CommandCode | |||
// The CAS (if applicable, or 0) | |||
Cas uint64 | |||
// An opaque value to be returned with this request | |||
Opaque uint32 | |||
// The vbucket to which this command belongs | |||
VBucket uint16 | |||
// Command extras, key, and body | |||
Extras, Key, Body, ExtMeta []byte | |||
// Datatype identifier | |||
DataType uint8 | |||
} | |||
// Size gives the number of bytes this request requires. | |||
func (req *MCRequest) Size() int { | |||
return HDR_LEN + len(req.Extras) + len(req.Key) + len(req.Body) + len(req.ExtMeta) | |||
} | |||
// A debugging string representation of this request | |||
func (req MCRequest) String() string { | |||
return fmt.Sprintf("{MCRequest opcode=%s, bodylen=%d, key='%s'}", | |||
req.Opcode, len(req.Body), req.Key) | |||
} | |||
func (req *MCRequest) fillHeaderBytes(data []byte) int { | |||
pos := 0 | |||
data[pos] = REQ_MAGIC | |||
pos++ | |||
data[pos] = byte(req.Opcode) | |||
pos++ | |||
binary.BigEndian.PutUint16(data[pos:pos+2], | |||
uint16(len(req.Key))) | |||
pos += 2 | |||
// 4 | |||
data[pos] = byte(len(req.Extras)) | |||
pos++ | |||
// Data type | |||
if req.DataType != 0 { | |||
data[pos] = byte(req.DataType) | |||
} | |||
pos++ | |||
binary.BigEndian.PutUint16(data[pos:pos+2], req.VBucket) | |||
pos += 2 | |||
// 8 | |||
binary.BigEndian.PutUint32(data[pos:pos+4], | |||
uint32(len(req.Body)+len(req.Key)+len(req.Extras)+len(req.ExtMeta))) | |||
pos += 4 | |||
// 12 | |||
binary.BigEndian.PutUint32(data[pos:pos+4], req.Opaque) | |||
pos += 4 | |||
// 16 | |||
if req.Cas != 0 { | |||
binary.BigEndian.PutUint64(data[pos:pos+8], req.Cas) | |||
} | |||
pos += 8 | |||
if len(req.Extras) > 0 { | |||
copy(data[pos:pos+len(req.Extras)], req.Extras) | |||
pos += len(req.Extras) | |||
} | |||
if len(req.Key) > 0 { | |||
copy(data[pos:pos+len(req.Key)], req.Key) | |||
pos += len(req.Key) | |||
} | |||
return pos | |||
} | |||
// HeaderBytes will return the wire representation of the request header | |||
// (with the extras and key). | |||
func (req *MCRequest) HeaderBytes() []byte { | |||
data := make([]byte, HDR_LEN+len(req.Extras)+len(req.Key)) | |||
req.fillHeaderBytes(data) | |||
return data | |||
} | |||
// Bytes will return the wire representation of this request. | |||
func (req *MCRequest) Bytes() []byte { | |||
data := make([]byte, req.Size()) | |||
pos := req.fillHeaderBytes(data) | |||
if len(req.Body) > 0 { | |||
copy(data[pos:pos+len(req.Body)], req.Body) | |||
} | |||
if len(req.ExtMeta) > 0 { | |||
copy(data[pos+len(req.Body):pos+len(req.Body)+len(req.ExtMeta)], req.ExtMeta) | |||
} | |||
return data | |||
} | |||
// Transmit will send this request message across a writer. | |||
func (req *MCRequest) Transmit(w io.Writer) (n int, err error) { | |||
if len(req.Body) < 128 { | |||
n, err = w.Write(req.Bytes()) | |||
} else { | |||
n, err = w.Write(req.HeaderBytes()) | |||
if err == nil { | |||
m := 0 | |||
m, err = w.Write(req.Body) | |||
n += m | |||
} | |||
} | |||
return | |||
} | |||
// Receive will fill this MCRequest with the data from a reader. | |||
func (req *MCRequest) Receive(r io.Reader, hdrBytes []byte) (int, error) { | |||
if len(hdrBytes) < HDR_LEN { | |||
hdrBytes = []byte{ | |||
0, 0, 0, 0, 0, 0, 0, 0, | |||
0, 0, 0, 0, 0, 0, 0, 0, | |||
0, 0, 0, 0, 0, 0, 0, 0} | |||
} | |||
n, err := io.ReadFull(r, hdrBytes) | |||
if err != nil { | |||
return n, err | |||
} | |||
if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC { | |||
return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0]) | |||
} | |||
klen := int(binary.BigEndian.Uint16(hdrBytes[2:])) | |||
elen := int(hdrBytes[4]) | |||
// Data type at 5 | |||
req.DataType = uint8(hdrBytes[5]) | |||
req.Opcode = CommandCode(hdrBytes[1]) | |||
// Vbucket at 6:7 | |||
req.VBucket = binary.BigEndian.Uint16(hdrBytes[6:]) | |||
totalBodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:])) | |||
req.Opaque = binary.BigEndian.Uint32(hdrBytes[12:]) | |||
req.Cas = binary.BigEndian.Uint64(hdrBytes[16:]) | |||
if totalBodyLen > 0 { | |||
buf := make([]byte, totalBodyLen) | |||
m, err := io.ReadFull(r, buf) | |||
n += m | |||
if err == nil { | |||
if req.Opcode >= TAP_MUTATION && | |||
req.Opcode <= TAP_CHECKPOINT_END && | |||
len(buf) > 1 { | |||
// In these commands there is "engine private" | |||
// data at the end of the extras. The first 2 | |||
// bytes of extra data give its length. | |||
elen += int(binary.BigEndian.Uint16(buf)) | |||
} | |||
req.Extras = buf[0:elen] | |||
req.Key = buf[elen : klen+elen] | |||
// get the length of extended metadata | |||
extMetaLen := 0 | |||
if elen > 29 { | |||
extMetaLen = int(binary.BigEndian.Uint16(req.Extras[28:30])) | |||
} | |||
bodyLen := totalBodyLen - klen - elen - extMetaLen | |||
if bodyLen > MaxBodyLen { | |||
return n, fmt.Errorf("%d is too big (max %d)", | |||
bodyLen, MaxBodyLen) | |||
} | |||
req.Body = buf[klen+elen : klen+elen+bodyLen] | |||
req.ExtMeta = buf[klen+elen+bodyLen:] | |||
} | |||
} | |||
return n, err | |||
} |
@ -0,0 +1,267 @@ | |||
package gomemcached | |||
import ( | |||
"encoding/binary" | |||
"fmt" | |||
"io" | |||
"sync" | |||
) | |||
// MCResponse is memcached response | |||
type MCResponse struct { | |||
// The command opcode of the command that sent the request | |||
Opcode CommandCode | |||
// The status of the response | |||
Status Status | |||
// The opaque sent in the request | |||
Opaque uint32 | |||
// The CAS identifier (if applicable) | |||
Cas uint64 | |||
// Extras, key, and body for this response | |||
Extras, Key, Body []byte | |||
// If true, this represents a fatal condition and we should hang up | |||
Fatal bool | |||
// Datatype identifier | |||
DataType uint8 | |||
} | |||
// A debugging string representation of this response | |||
func (res MCResponse) String() string { | |||
return fmt.Sprintf("{MCResponse status=%v keylen=%d, extralen=%d, bodylen=%d}", | |||
res.Status, len(res.Key), len(res.Extras), len(res.Body)) | |||
} | |||
// Response as an error. | |||
func (res *MCResponse) Error() string { | |||
return fmt.Sprintf("MCResponse status=%v, opcode=%v, opaque=%v, msg: %s", | |||
res.Status, res.Opcode, res.Opaque, string(res.Body)) | |||
} | |||
func errStatus(e error) Status { | |||
status := Status(0xffff) | |||
if res, ok := e.(*MCResponse); ok { | |||
status = res.Status | |||
} | |||
return status | |||
} | |||
// IsNotFound is true if this error represents a "not found" response. | |||
func IsNotFound(e error) bool { | |||
return errStatus(e) == KEY_ENOENT | |||
} | |||
// IsFatal is false if this error isn't believed to be fatal to a connection. | |||
func IsFatal(e error) bool { | |||
if e == nil { | |||
return false | |||
} | |||
_, ok := isFatal[errStatus(e)] | |||
if ok { | |||
return true | |||
} | |||
return false | |||
} | |||
// Size is number of bytes this response consumes on the wire. | |||
func (res *MCResponse) Size() int { | |||
return HDR_LEN + len(res.Extras) + len(res.Key) + len(res.Body) | |||
} | |||
func (res *MCResponse) fillHeaderBytes(data []byte) int { | |||
pos := 0 | |||
data[pos] = RES_MAGIC | |||
pos++ | |||
data[pos] = byte(res.Opcode) | |||
pos++ | |||
binary.BigEndian.PutUint16(data[pos:pos+2], | |||
uint16(len(res.Key))) | |||
pos += 2 | |||
// 4 | |||
data[pos] = byte(len(res.Extras)) | |||
pos++ | |||
// Data type | |||
if res.DataType != 0 { | |||
data[pos] = byte(res.DataType) | |||
} else { | |||
data[pos] = 0 | |||
} | |||
pos++ | |||
binary.BigEndian.PutUint16(data[pos:pos+2], uint16(res.Status)) | |||
pos += 2 | |||
// 8 | |||
binary.BigEndian.PutUint32(data[pos:pos+4], | |||
uint32(len(res.Body)+len(res.Key)+len(res.Extras))) | |||
pos += 4 | |||
// 12 | |||
binary.BigEndian.PutUint32(data[pos:pos+4], res.Opaque) | |||
pos += 4 | |||
// 16 | |||
binary.BigEndian.PutUint64(data[pos:pos+8], res.Cas) | |||
pos += 8 | |||
if len(res.Extras) > 0 { | |||
copy(data[pos:pos+len(res.Extras)], res.Extras) | |||
pos += len(res.Extras) | |||
} | |||
if len(res.Key) > 0 { | |||
copy(data[pos:pos+len(res.Key)], res.Key) | |||
pos += len(res.Key) | |||
} | |||
return pos | |||
} | |||
// HeaderBytes will get just the header bytes for this response. | |||
func (res *MCResponse) HeaderBytes() []byte { | |||
data := make([]byte, HDR_LEN+len(res.Extras)+len(res.Key)) | |||
res.fillHeaderBytes(data) | |||
return data | |||
} | |||
// Bytes will return the actual bytes transmitted for this response. | |||
func (res *MCResponse) Bytes() []byte { | |||
data := make([]byte, res.Size()) | |||
pos := res.fillHeaderBytes(data) | |||
copy(data[pos:pos+len(res.Body)], res.Body) | |||
return data | |||
} | |||
// Transmit will send this response message across a writer. | |||
func (res *MCResponse) Transmit(w io.Writer) (n int, err error) { | |||
if len(res.Body) < 128 { | |||
n, err = w.Write(res.Bytes()) | |||
} else { | |||
n, err = w.Write(res.HeaderBytes()) | |||
if err == nil { | |||
m := 0 | |||
m, err = w.Write(res.Body) | |||
m += n | |||
} | |||
} | |||
return | |||
} | |||
// Receive will fill this MCResponse with the data from this reader. | |||
func (res *MCResponse) Receive(r io.Reader, hdrBytes []byte) (n int, err error) { | |||
if len(hdrBytes) < HDR_LEN { | |||
hdrBytes = []byte{ | |||
0, 0, 0, 0, 0, 0, 0, 0, | |||
0, 0, 0, 0, 0, 0, 0, 0, | |||
0, 0, 0, 0, 0, 0, 0, 0} | |||
} | |||
n, err = io.ReadFull(r, hdrBytes) | |||
if err != nil { | |||
return n, err | |||
} | |||
if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC { | |||
return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0]) | |||
} | |||
klen := int(binary.BigEndian.Uint16(hdrBytes[2:4])) | |||
elen := int(hdrBytes[4]) | |||
res.Opcode = CommandCode(hdrBytes[1]) | |||
res.DataType = uint8(hdrBytes[5]) | |||
res.Status = Status(binary.BigEndian.Uint16(hdrBytes[6:8])) | |||
res.Opaque = binary.BigEndian.Uint32(hdrBytes[12:16]) | |||
res.Cas = binary.BigEndian.Uint64(hdrBytes[16:24]) | |||
bodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:12])) - (klen + elen) | |||
//defer function to debug the panic seen with MB-15557 | |||
defer func() { | |||
if e := recover(); e != nil { | |||
err = fmt.Errorf(`Panic in Receive. Response %v \n | |||
key len %v extra len %v bodylen %v`, res, klen, elen, bodyLen) | |||
} | |||
}() | |||
buf := make([]byte, klen+elen+bodyLen) | |||
m, err := io.ReadFull(r, buf) | |||
if err == nil { | |||
res.Extras = buf[0:elen] | |||
res.Key = buf[elen : klen+elen] | |||
res.Body = buf[klen+elen:] | |||
} | |||
return n + m, err | |||
} | |||
type MCResponsePool struct { | |||
pool *sync.Pool | |||
} | |||
func NewMCResponsePool() *MCResponsePool { | |||
rv := &MCResponsePool{ | |||
pool: &sync.Pool{ | |||
New: func() interface{} { | |||
return &MCResponse{} | |||
}, | |||
}, | |||
} | |||
return rv | |||
} | |||
func (this *MCResponsePool) Get() *MCResponse { | |||
return this.pool.Get().(*MCResponse) | |||
} | |||
func (this *MCResponsePool) Put(r *MCResponse) { | |||
if r == nil { | |||
return | |||
} | |||
r.Extras = nil | |||
r.Key = nil | |||
r.Body = nil | |||
r.Fatal = false | |||
this.pool.Put(r) | |||
} | |||
type StringMCResponsePool struct { | |||
pool *sync.Pool | |||
size int | |||
} | |||
func NewStringMCResponsePool(size int) *StringMCResponsePool { | |||
rv := &StringMCResponsePool{ | |||
pool: &sync.Pool{ | |||
New: func() interface{} { | |||
return make(map[string]*MCResponse, size) | |||
}, | |||
}, | |||
size: size, | |||
} | |||
return rv | |||
} | |||
func (this *StringMCResponsePool) Get() map[string]*MCResponse { | |||
return this.pool.Get().(map[string]*MCResponse) | |||
} | |||
func (this *StringMCResponsePool) Put(m map[string]*MCResponse) { | |||
if m == nil || len(m) > 2*this.size { | |||
return | |||
} | |||
for k := range m { | |||
m[k] = nil | |||
delete(m, k) | |||
} | |||
this.pool.Put(m) | |||
} |
@ -0,0 +1,168 @@ | |||
package gomemcached | |||
import ( | |||
"bytes" | |||
"encoding/binary" | |||
"fmt" | |||
"io" | |||
"io/ioutil" | |||
"strings" | |||
) | |||
type TapConnectFlag uint32 | |||
// Tap connect option flags | |||
const ( | |||
BACKFILL = TapConnectFlag(0x01) | |||
DUMP = TapConnectFlag(0x02) | |||
LIST_VBUCKETS = TapConnectFlag(0x04) | |||
TAKEOVER_VBUCKETS = TapConnectFlag(0x08) | |||
SUPPORT_ACK = TapConnectFlag(0x10) | |||
REQUEST_KEYS_ONLY = TapConnectFlag(0x20) | |||
CHECKPOINT = TapConnectFlag(0x40) | |||
REGISTERED_CLIENT = TapConnectFlag(0x80) | |||
FIX_FLAG_BYTEORDER = TapConnectFlag(0x100) | |||
) | |||
// Tap opaque event subtypes | |||
const ( | |||
TAP_OPAQUE_ENABLE_AUTO_NACK = 0 | |||
TAP_OPAQUE_INITIAL_VBUCKET_STREAM = 1 | |||
TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC = 2 | |||
TAP_OPAQUE_CLOSE_TAP_STREAM = 7 | |||
TAP_OPAQUE_CLOSE_BACKFILL = 8 | |||
) | |||
// Tap item flags | |||
const ( | |||
TAP_ACK = 1 | |||
TAP_NO_VALUE = 2 | |||
TAP_FLAG_NETWORK_BYTE_ORDER = 4 | |||
) | |||
// TapConnectFlagNames for TapConnectFlag | |||
var TapConnectFlagNames = map[TapConnectFlag]string{ | |||
BACKFILL: "BACKFILL", | |||
DUMP: "DUMP", | |||
LIST_VBUCKETS: "LIST_VBUCKETS", | |||
TAKEOVER_VBUCKETS: "TAKEOVER_VBUCKETS", | |||
SUPPORT_ACK: "SUPPORT_ACK", | |||
REQUEST_KEYS_ONLY: "REQUEST_KEYS_ONLY", | |||
CHECKPOINT: "CHECKPOINT", | |||
REGISTERED_CLIENT: "REGISTERED_CLIENT", | |||
FIX_FLAG_BYTEORDER: "FIX_FLAG_BYTEORDER", | |||
} | |||
// TapItemParser is a function to parse a single tap extra. | |||
type TapItemParser func(io.Reader) (interface{}, error) | |||
// TapParseUint64 is a function to parse a single tap uint64. | |||
func TapParseUint64(r io.Reader) (interface{}, error) { | |||
var rv uint64 | |||
err := binary.Read(r, binary.BigEndian, &rv) | |||
return rv, err | |||
} | |||
// TapParseUint16 is a function to parse a single tap uint16. | |||
func TapParseUint16(r io.Reader) (interface{}, error) { | |||
var rv uint16 | |||
err := binary.Read(r, binary.BigEndian, &rv) | |||
return rv, err | |||
} | |||
// TapParseBool is a function to parse a single tap boolean. | |||
func TapParseBool(r io.Reader) (interface{}, error) { | |||
return true, nil | |||
} | |||
// TapParseVBList parses a list of vBucket numbers as []uint16. | |||
func TapParseVBList(r io.Reader) (interface{}, error) { | |||
num, err := TapParseUint16(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
n := int(num.(uint16)) | |||
rv := make([]uint16, n) | |||
for i := 0; i < n; i++ { | |||
x, err := TapParseUint16(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
rv[i] = x.(uint16) | |||
} | |||
return rv, err | |||
} | |||
// TapFlagParsers parser functions for TAP fields. | |||
var TapFlagParsers = map[TapConnectFlag]TapItemParser{ | |||
BACKFILL: TapParseUint64, | |||
LIST_VBUCKETS: TapParseVBList, | |||
} | |||
// SplitFlags will split the ORed flags into the individual bit flags. | |||
func (f TapConnectFlag) SplitFlags() []TapConnectFlag { | |||
rv := []TapConnectFlag{} | |||
for i := uint32(1); f != 0; i = i << 1 { | |||
if uint32(f)&i == i { | |||
rv = append(rv, TapConnectFlag(i)) | |||
} | |||
f = TapConnectFlag(uint32(f) & (^i)) | |||
} | |||
return rv | |||
} | |||
func (f TapConnectFlag) String() string { | |||
parts := []string{} | |||
for _, x := range f.SplitFlags() { | |||
p := TapConnectFlagNames[x] | |||
if p == "" { | |||
p = fmt.Sprintf("0x%x", int(x)) | |||
} | |||
parts = append(parts, p) | |||
} | |||
return strings.Join(parts, "|") | |||
} | |||
type TapConnect struct { | |||
Flags map[TapConnectFlag]interface{} | |||
RemainingBody []byte | |||
Name string | |||
} | |||
// ParseTapCommands parse the tap request into the interesting bits we may | |||
// need to do something with. | |||
func (req *MCRequest) ParseTapCommands() (TapConnect, error) { | |||
rv := TapConnect{ | |||
Flags: map[TapConnectFlag]interface{}{}, | |||
Name: string(req.Key), | |||
} | |||
if len(req.Extras) < 4 { | |||
return rv, fmt.Errorf("not enough extra bytes: %x", req.Extras) | |||
} | |||
flags := TapConnectFlag(binary.BigEndian.Uint32(req.Extras)) | |||
r := bytes.NewReader(req.Body) | |||
for _, f := range flags.SplitFlags() { | |||
fun := TapFlagParsers[f] | |||
if fun == nil { | |||
fun = TapParseBool | |||
} | |||
val, err := fun(r) | |||
if err != nil { | |||
return rv, err | |||
} | |||
rv.Flags[f] = val | |||
} | |||
var err error | |||
rv.RemainingBody, err = ioutil.ReadAll(r) | |||
return rv, err | |||
} |
@ -0,0 +1,47 @@ | |||
COUCHBASE INC. COMMUNITY EDITION LICENSE AGREEMENT | |||
IMPORTANT-READ CAREFULLY: BY CLICKING THE "I ACCEPT" BOX OR INSTALLING, | |||
DOWNLOADING OR OTHERWISE USING THIS SOFTWARE AND ANY ASSOCIATED | |||
DOCUMENTATION, YOU, ON BEHALF OF YOURSELF OR AS AN AUTHORIZED | |||
REPRESENTATIVE ON BEHALF OF AN ENTITY ("LICENSEE") AGREE TO ALL THE | |||
TERMS OF THIS COMMUNITY EDITION LICENSE AGREEMENT (THE "AGREEMENT") | |||
REGARDING YOUR USE OF THE SOFTWARE. YOU REPRESENT AND WARRANT THAT YOU | |||
HAVE FULL LEGAL AUTHORITY TO BIND THE LICENSEE TO THIS AGREEMENT. IF YOU | |||
DO NOT AGREE WITH ALL OF THESE TERMS, DO NOT SELECT THE "I ACCEPT" BOX | |||
AND DO NOT INSTALL, DOWNLOAD OR OTHERWISE USE THE SOFTWARE. THE | |||
EFFECTIVE DATE OF THIS AGREEMENT IS THE DATE ON WHICH YOU CLICK "I | |||
ACCEPT" OR OTHERWISE INSTALL, DOWNLOAD OR USE THE SOFTWARE. | |||
1. License Grant. Couchbase Inc. hereby grants Licensee, free of charge, | |||
the non-exclusive right to use, copy, merge, publish, distribute, | |||
sublicense, and/or sell copies of the Software, and to permit persons to | |||
whom the Software is furnished to do so, subject to Licensee including | |||
the following copyright notice in all copies or substantial portions of | |||
the Software: | |||
Couchbase (r) http://www.Couchbase.com Copyright 2016 Couchbase, Inc. | |||
As used in this Agreement, "Software" means the object code version of | |||
the applicable elastic data management server software provided by | |||
Couchbase Inc. | |||
2. Restrictions. Licensee will not reverse engineer, disassemble, or | |||
decompile the Software (except to the extent such restrictions are | |||
prohibited by law). | |||
3. Support. Couchbase, Inc. will provide Licensee with access to, and | |||
use of, the Couchbase, Inc. support forum available at the following | |||
URL: http://www.couchbase.org/forums/. Couchbase, Inc. may, at its | |||
discretion, modify, suspend or terminate support at any time upon notice | |||
to Licensee. | |||
4. Warranty Disclaimer and Limitation of Liability. THE SOFTWARE IS | |||
PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, | |||
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | |||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
COUCHBASE INC. OR THE AUTHORS OR COPYRIGHT HOLDERS IN THE SOFTWARE BE | |||
LIABLE FOR ANY CLAIM, DAMAGES (IINCLUDING, WITHOUT LIMITATION, DIRECT, | |||
INDIRECT OR CONSEQUENTIAL DAMAGES) OR OTHER LIABILITY, WHETHER IN AN | |||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
SOFTWARE. |
@ -0,0 +1,481 @@ | |||
// Copyright (c) 2016 Couchbase, Inc. | |||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file | |||
// except in compliance with the License. You may obtain a copy of the License at | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// Unless required by applicable law or agreed to in writing, software distributed under the | |||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, | |||
// either express or implied. See the License for the specific language governing permissions | |||
// and limitations under the License. | |||
package logging | |||
import ( | |||
"os" | |||
"runtime" | |||
"strings" | |||
"sync" | |||
) | |||
type Level int | |||
const ( | |||
NONE = Level(iota) // Disable all logging | |||
FATAL // System is in severe error state and has to abort | |||
SEVERE // System is in severe error state and cannot recover reliably | |||
ERROR // System is in error state but can recover and continue reliably | |||
WARN // System approaching error state, or is in a correct but undesirable state | |||
INFO // System-level events and status, in correct states | |||
REQUEST // Request-level events, with request-specific rlevel | |||
TRACE // Trace detailed system execution, e.g. function entry / exit | |||
DEBUG // Debug | |||
) | |||
type LogEntryFormatter int | |||
const ( | |||
TEXTFORMATTER = LogEntryFormatter(iota) | |||
JSONFORMATTER | |||
KVFORMATTER | |||
) | |||
func (level Level) String() string { | |||
return _LEVEL_NAMES[level] | |||
} | |||
var _LEVEL_NAMES = []string{ | |||
DEBUG: "DEBUG", | |||
TRACE: "TRACE", | |||
REQUEST: "REQUEST", | |||
INFO: "INFO", | |||
WARN: "WARN", | |||
ERROR: "ERROR", | |||
SEVERE: "SEVERE", | |||
FATAL: "FATAL", | |||
NONE: "NONE", | |||
} | |||
var _LEVEL_MAP = map[string]Level{ | |||
"debug": DEBUG, | |||
"trace": TRACE, | |||
"request": REQUEST, | |||
"info": INFO, | |||
"warn": WARN, | |||
"error": ERROR, | |||
"severe": SEVERE, | |||
"fatal": FATAL, | |||
"none": NONE, | |||
} | |||
func ParseLevel(name string) (level Level, ok bool) { | |||
level, ok = _LEVEL_MAP[strings.ToLower(name)] | |||
return | |||
} | |||
/* | |||
Pair supports logging of key-value pairs. Keys beginning with _ are | |||
reserved for the logger, e.g. _time, _level, _msg, and _rlevel. The | |||
Pair APIs are designed to avoid heap allocation and garbage | |||
collection. | |||
*/ | |||
type Pairs []Pair | |||
type Pair struct { | |||
Name string | |||
Value interface{} | |||
} | |||
/* | |||
Map allows key-value pairs to be specified using map literals or data | |||
structures. For example: | |||
Errorm(msg, Map{...}) | |||
Map incurs heap allocation and garbage collection, so the Pair APIs | |||
should be preferred. | |||
*/ | |||
type Map map[string]interface{} | |||
// Logger provides a common interface for logging libraries | |||
type Logger interface { | |||
/* | |||
These APIs write all the given pairs in addition to standard logger keys. | |||
*/ | |||
Logp(level Level, msg string, kv ...Pair) | |||
Debugp(msg string, kv ...Pair) | |||
Tracep(msg string, kv ...Pair) | |||
Requestp(rlevel Level, msg string, kv ...Pair) | |||
Infop(msg string, kv ...Pair) | |||
Warnp(msg string, kv ...Pair) | |||
Errorp(msg string, kv ...Pair) | |||
Severep(msg string, kv ...Pair) | |||
Fatalp(msg string, kv ...Pair) | |||
/* | |||
These APIs write the fields in the given kv Map in addition to standard logger keys. | |||
*/ | |||
Logm(level Level, msg string, kv Map) | |||
Debugm(msg string, kv Map) | |||
Tracem(msg string, kv Map) | |||
Requestm(rlevel Level, msg string, kv Map) | |||
Infom(msg string, kv Map) | |||
Warnm(msg string, kv Map) | |||
Errorm(msg string, kv Map) | |||
Severem(msg string, kv Map) | |||
Fatalm(msg string, kv Map) | |||
/* | |||
These APIs only write _msg, _time, _level, and other logger keys. If | |||
the msg contains other fields, use the Pair or Map APIs instead. | |||
*/ | |||
Logf(level Level, fmt string, args ...interface{}) | |||
Debugf(fmt string, args ...interface{}) | |||
Tracef(fmt string, args ...interface{}) | |||
Requestf(rlevel Level, fmt string, args ...interface{}) | |||
Infof(fmt string, args ...interface{}) | |||
Warnf(fmt string, args ...interface{}) | |||
Errorf(fmt string, args ...interface{}) | |||
Severef(fmt string, args ...interface{}) | |||
Fatalf(fmt string, args ...interface{}) | |||
/* | |||
These APIs control the logging level | |||
*/ | |||
SetLevel(Level) // Set the logging level | |||
Level() Level // Get the current logging level | |||
} | |||
var logger Logger = nil | |||
var curLevel Level = DEBUG // initially set to never skip | |||
var loggerMutex sync.RWMutex | |||
// All the methods below first acquire the mutex (mostly in exclusive mode) | |||
// and only then check if logging at the current level is enabled. | |||
// This introduces a fair bottleneck for those log entries that should be | |||
// skipped (the majority, at INFO or below levels) | |||
// We try to predict here if we should lock the mutex at all by caching | |||
// the current log level: while dynamically changing logger, there might | |||
// be the odd entry skipped as the new level is cached. | |||
// Since we seem to never change the logger, this is not an issue. | |||
func skipLogging(level Level) bool { | |||
if logger == nil { | |||
return true | |||
} | |||
return level > curLevel | |||
} | |||
func SetLogger(newLogger Logger) { | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger = newLogger | |||
if logger == nil { | |||
curLevel = NONE | |||
} else { | |||
curLevel = newLogger.Level() | |||
} | |||
} | |||
func Logp(level Level, msg string, kv ...Pair) { | |||
if skipLogging(level) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Logp(level, msg, kv...) | |||
} | |||
func Debugp(msg string, kv ...Pair) { | |||
if skipLogging(DEBUG) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Debugp(msg, kv...) | |||
} | |||
func Tracep(msg string, kv ...Pair) { | |||
if skipLogging(TRACE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Tracep(msg, kv...) | |||
} | |||
func Requestp(rlevel Level, msg string, kv ...Pair) { | |||
if skipLogging(REQUEST) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Requestp(rlevel, msg, kv...) | |||
} | |||
func Infop(msg string, kv ...Pair) { | |||
if skipLogging(INFO) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Infop(msg, kv...) | |||
} | |||
func Warnp(msg string, kv ...Pair) { | |||
if skipLogging(WARN) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Warnp(msg, kv...) | |||
} | |||
func Errorp(msg string, kv ...Pair) { | |||
if skipLogging(ERROR) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Errorp(msg, kv...) | |||
} | |||
func Severep(msg string, kv ...Pair) { | |||
if skipLogging(SEVERE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Severep(msg, kv...) | |||
} | |||
func Fatalp(msg string, kv ...Pair) { | |||
if skipLogging(FATAL) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Fatalp(msg, kv...) | |||
} | |||
func Logm(level Level, msg string, kv Map) { | |||
if skipLogging(level) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Logm(level, msg, kv) | |||
} | |||
func Debugm(msg string, kv Map) { | |||
if skipLogging(DEBUG) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Debugm(msg, kv) | |||
} | |||
func Tracem(msg string, kv Map) { | |||
if skipLogging(TRACE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Tracem(msg, kv) | |||
} | |||
func Requestm(rlevel Level, msg string, kv Map) { | |||
if skipLogging(REQUEST) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Requestm(rlevel, msg, kv) | |||
} | |||
func Infom(msg string, kv Map) { | |||
if skipLogging(INFO) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Infom(msg, kv) | |||
} | |||
func Warnm(msg string, kv Map) { | |||
if skipLogging(WARN) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Warnm(msg, kv) | |||
} | |||
func Errorm(msg string, kv Map) { | |||
if skipLogging(ERROR) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Errorm(msg, kv) | |||
} | |||
func Severem(msg string, kv Map) { | |||
if skipLogging(SEVERE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Severem(msg, kv) | |||
} | |||
func Fatalm(msg string, kv Map) { | |||
if skipLogging(FATAL) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Fatalm(msg, kv) | |||
} | |||
func Logf(level Level, fmt string, args ...interface{}) { | |||
if skipLogging(level) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Logf(level, fmt, args...) | |||
} | |||
func Debugf(fmt string, args ...interface{}) { | |||
if skipLogging(DEBUG) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Debugf(fmt, args...) | |||
} | |||
func Tracef(fmt string, args ...interface{}) { | |||
if skipLogging(TRACE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Tracef(fmt, args...) | |||
} | |||
func Requestf(rlevel Level, fmt string, args ...interface{}) { | |||
if skipLogging(REQUEST) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Requestf(rlevel, fmt, args...) | |||
} | |||
func Infof(fmt string, args ...interface{}) { | |||
if skipLogging(INFO) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Infof(fmt, args...) | |||
} | |||
func Warnf(fmt string, args ...interface{}) { | |||
if skipLogging(WARN) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Warnf(fmt, args...) | |||
} | |||
func Errorf(fmt string, args ...interface{}) { | |||
if skipLogging(ERROR) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Errorf(fmt, args...) | |||
} | |||
func Severef(fmt string, args ...interface{}) { | |||
if skipLogging(SEVERE) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Severef(fmt, args...) | |||
} | |||
func Fatalf(fmt string, args ...interface{}) { | |||
if skipLogging(FATAL) { | |||
return | |||
} | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Fatalf(fmt, args...) | |||
} | |||
func SetLevel(level Level) { | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.SetLevel(level) | |||
curLevel = level | |||
} | |||
func LogLevel() Level { | |||
loggerMutex.RLock() | |||
defer loggerMutex.RUnlock() | |||
return logger.Level() | |||
} | |||
func Stackf(level Level, fmt string, args ...interface{}) { | |||
if skipLogging(level) { | |||
return | |||
} | |||
buf := make([]byte, 1<<16) | |||
n := runtime.Stack(buf, false) | |||
s := string(buf[0:n]) | |||
loggerMutex.Lock() | |||
defer loggerMutex.Unlock() | |||
logger.Logf(level, fmt, args...) | |||
logger.Logf(level, s) | |||
} | |||
func init() { | |||
logger = NewLogger(os.Stderr, INFO, TEXTFORMATTER) | |||
SetLogger(logger) | |||
} |
@ -0,0 +1,318 @@ | |||
// Copyright (c) 2016 Couchbase, Inc. | |||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file | |||
// except in compliance with the License. You may obtain a copy of the License at | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// Unless required by applicable law or agreed to in writing, software distributed under the | |||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, | |||
// either express or implied. See the License for the specific language governing permissions | |||
// and limitations under the License. | |||
package logging | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"fmt" | |||
"io" | |||
"log" | |||
"time" | |||
) | |||
type goLogger struct { | |||
logger *log.Logger | |||
level Level | |||
entryFormatter formatter | |||
} | |||
const ( | |||
_LEVEL = "_level" | |||
_MSG = "_msg" | |||
_TIME = "_time" | |||
_RLEVEL = "_rlevel" | |||
) | |||
func NewLogger(out io.Writer, lvl Level, fmtLogging LogEntryFormatter) *goLogger { | |||
logger := &goLogger{ | |||
logger: log.New(out, "", 0), | |||
level: lvl, | |||
} | |||
if fmtLogging == JSONFORMATTER { | |||
logger.entryFormatter = &jsonFormatter{} | |||
} else if fmtLogging == KVFORMATTER { | |||
logger.entryFormatter = &keyvalueFormatter{} | |||
} else { | |||
logger.entryFormatter = &textFormatter{} | |||
} | |||
return logger | |||
} | |||
func (gl *goLogger) Logp(level Level, msg string, kv ...Pair) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if level <= gl.level { | |||
e := newLogEntry(msg, level) | |||
copyPairs(e, kv) | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Debugp(msg string, kv ...Pair) { | |||
gl.Logp(DEBUG, msg, kv...) | |||
} | |||
func (gl *goLogger) Tracep(msg string, kv ...Pair) { | |||
gl.Logp(TRACE, msg, kv...) | |||
} | |||
func (gl *goLogger) Requestp(rlevel Level, msg string, kv ...Pair) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if REQUEST <= gl.level { | |||
e := newLogEntry(msg, REQUEST) | |||
e.Rlevel = rlevel | |||
copyPairs(e, kv) | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Infop(msg string, kv ...Pair) { | |||
gl.Logp(INFO, msg, kv...) | |||
} | |||
func (gl *goLogger) Warnp(msg string, kv ...Pair) { | |||
gl.Logp(WARN, msg, kv...) | |||
} | |||
func (gl *goLogger) Errorp(msg string, kv ...Pair) { | |||
gl.Logp(ERROR, msg, kv...) | |||
} | |||
func (gl *goLogger) Severep(msg string, kv ...Pair) { | |||
gl.Logp(SEVERE, msg, kv...) | |||
} | |||
func (gl *goLogger) Fatalp(msg string, kv ...Pair) { | |||
gl.Logp(FATAL, msg, kv...) | |||
} | |||
func (gl *goLogger) Logm(level Level, msg string, kv Map) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if level <= gl.level { | |||
e := newLogEntry(msg, level) | |||
e.Data = kv | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Debugm(msg string, kv Map) { | |||
gl.Logm(DEBUG, msg, kv) | |||
} | |||
func (gl *goLogger) Tracem(msg string, kv Map) { | |||
gl.Logm(TRACE, msg, kv) | |||
} | |||
func (gl *goLogger) Requestm(rlevel Level, msg string, kv Map) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if REQUEST <= gl.level { | |||
e := newLogEntry(msg, REQUEST) | |||
e.Rlevel = rlevel | |||
e.Data = kv | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Infom(msg string, kv Map) { | |||
gl.Logm(INFO, msg, kv) | |||
} | |||
func (gl *goLogger) Warnm(msg string, kv Map) { | |||
gl.Logm(WARN, msg, kv) | |||
} | |||
func (gl *goLogger) Errorm(msg string, kv Map) { | |||
gl.Logm(ERROR, msg, kv) | |||
} | |||
func (gl *goLogger) Severem(msg string, kv Map) { | |||
gl.Logm(SEVERE, msg, kv) | |||
} | |||
func (gl *goLogger) Fatalm(msg string, kv Map) { | |||
gl.Logm(FATAL, msg, kv) | |||
} | |||
func (gl *goLogger) Logf(level Level, format string, args ...interface{}) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if level <= gl.level { | |||
e := newLogEntry(fmt.Sprintf(format, args...), level) | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Debugf(format string, args ...interface{}) { | |||
gl.Logf(DEBUG, format, args...) | |||
} | |||
func (gl *goLogger) Tracef(format string, args ...interface{}) { | |||
gl.Logf(TRACE, format, args...) | |||
} | |||
func (gl *goLogger) Requestf(rlevel Level, format string, args ...interface{}) { | |||
if gl.logger == nil { | |||
return | |||
} | |||
if REQUEST <= gl.level { | |||
e := newLogEntry(fmt.Sprintf(format, args...), REQUEST) | |||
e.Rlevel = rlevel | |||
gl.log(e) | |||
} | |||
} | |||
func (gl *goLogger) Infof(format string, args ...interface{}) { | |||
gl.Logf(INFO, format, args...) | |||
} | |||
func (gl *goLogger) Warnf(format string, args ...interface{}) { | |||
gl.Logf(WARN, format, args...) | |||
} | |||
func (gl *goLogger) Errorf(format string, args ...interface{}) { | |||
gl.Logf(ERROR, format, args...) | |||
} | |||
func (gl *goLogger) Severef(format string, args ...interface{}) { | |||
gl.Logf(SEVERE, format, args...) | |||
} | |||
func (gl *goLogger) Fatalf(format string, args ...interface{}) { | |||
gl.Logf(FATAL, format, args...) | |||
} | |||
func (gl *goLogger) Level() Level { | |||
return gl.level | |||
} | |||
func (gl *goLogger) SetLevel(level Level) { | |||
gl.level = level | |||
} | |||
func (gl *goLogger) log(newEntry *logEntry) { | |||
s := gl.entryFormatter.format(newEntry) | |||
gl.logger.Print(s) | |||
} | |||
type logEntry struct { | |||
Time string | |||
Level Level | |||
Rlevel Level | |||
Message string | |||
Data Map | |||
} | |||
func newLogEntry(msg string, level Level) *logEntry { | |||
return &logEntry{ | |||
Time: time.Now().Format("2006-01-02T15:04:05.000-07:00"), // time.RFC3339 with milliseconds | |||
Level: level, | |||
Rlevel: NONE, | |||
Message: msg, | |||
} | |||
} | |||
func copyPairs(newEntry *logEntry, pairs []Pair) { | |||
newEntry.Data = make(Map, len(pairs)) | |||
for _, p := range pairs { | |||
newEntry.Data[p.Name] = p.Value | |||
} | |||
} | |||
type formatter interface { | |||
format(*logEntry) string | |||
} | |||
type textFormatter struct { | |||
} | |||
// ex. 2016-02-10T09:15:25.498-08:00 [INFO] This is a message from test in text format | |||
func (*textFormatter) format(newEntry *logEntry) string { | |||
b := &bytes.Buffer{} | |||
appendValue(b, newEntry.Time) | |||
if newEntry.Rlevel != NONE { | |||
fmt.Fprintf(b, "[%s,%s] ", newEntry.Level.String(), newEntry.Rlevel.String()) | |||
} else { | |||
fmt.Fprintf(b, "[%s] ", newEntry.Level.String()) | |||
} | |||
appendValue(b, newEntry.Message) | |||
for key, value := range newEntry.Data { | |||
appendKeyValue(b, key, value) | |||
} | |||
b.WriteByte('\n') | |||
s := bytes.NewBuffer(b.Bytes()) | |||
return s.String() | |||
} | |||
func appendValue(b *bytes.Buffer, value interface{}) { | |||
if _, ok := value.(string); ok { | |||
fmt.Fprintf(b, "%s ", value) | |||
} else { | |||
fmt.Fprintf(b, "%v ", value) | |||
} | |||
} | |||
type keyvalueFormatter struct { | |||
} | |||
// ex. _time=2016-02-10T09:15:25.498-08:00 _level=INFO _msg=This is a message from test in key-value format | |||
func (*keyvalueFormatter) format(newEntry *logEntry) string { | |||
b := &bytes.Buffer{} | |||
appendKeyValue(b, _TIME, newEntry.Time) | |||
appendKeyValue(b, _LEVEL, newEntry.Level.String()) | |||
if newEntry.Rlevel != NONE { | |||
appendKeyValue(b, _RLEVEL, newEntry.Rlevel.String()) | |||
} | |||
appendKeyValue(b, _MSG, newEntry.Message) | |||
for key, value := range newEntry.Data { | |||
appendKeyValue(b, key, value) | |||
} | |||
b.WriteByte('\n') | |||
s := bytes.NewBuffer(b.Bytes()) | |||
return s.String() | |||
} | |||
func appendKeyValue(b *bytes.Buffer, key, value interface{}) { | |||
if _, ok := value.(string); ok { | |||
fmt.Fprintf(b, "%v=%s ", key, value) | |||
} else { | |||
fmt.Fprintf(b, "%v=%v ", key, value) | |||
} | |||
} | |||
type jsonFormatter struct { | |||
} | |||
// ex. {"_level":"INFO","_msg":"This is a message from test in json format","_time":"2016-02-10T09:12:59.518-08:00"} | |||
func (*jsonFormatter) format(newEntry *logEntry) string { | |||
if newEntry.Data == nil { | |||
newEntry.Data = make(Map, 5) | |||
} | |||
newEntry.Data[_TIME] = newEntry.Time | |||
newEntry.Data[_LEVEL] = newEntry.Level.String() | |||
if newEntry.Rlevel != NONE { | |||
newEntry.Data[_RLEVEL] = newEntry.Rlevel.String() | |||
} | |||
newEntry.Data[_MSG] = newEntry.Message | |||
serialized, _ := json.Marshal(newEntry.Data) | |||
s := bytes.NewBuffer(append(serialized, '\n')) | |||
return s.String() | |||
} |
@ -0,0 +1,207 @@ | |||
// @author Couchbase <info@couchbase.com> | |||
// @copyright 2018 Couchbase, Inc. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package scramsha provides implementation of client side SCRAM-SHA | |||
// according to https://tools.ietf.org/html/rfc5802 | |||
package scramsha | |||
import ( | |||
"crypto/hmac" | |||
"crypto/rand" | |||
"crypto/sha1" | |||
"crypto/sha256" | |||
"crypto/sha512" | |||
"encoding/base64" | |||
"fmt" | |||
"github.com/pkg/errors" | |||
"golang.org/x/crypto/pbkdf2" | |||
"hash" | |||
"strconv" | |||
"strings" | |||
) | |||
func hmacHash(message []byte, secret []byte, hashFunc func() hash.Hash) []byte { | |||
h := hmac.New(hashFunc, secret) | |||
h.Write(message) | |||
return h.Sum(nil) | |||
} | |||
func shaHash(message []byte, hashFunc func() hash.Hash) []byte { | |||
h := hashFunc() | |||
h.Write(message) | |||
return h.Sum(nil) | |||
} | |||
func generateClientNonce(size int) (string, error) { | |||
randomBytes := make([]byte, size) | |||
_, err := rand.Read(randomBytes) | |||
if err != nil { | |||
return "", errors.Wrap(err, "Unable to generate nonce") | |||
} | |||
return base64.StdEncoding.EncodeToString(randomBytes), nil | |||
} | |||
// ScramSha provides context for SCRAM-SHA handling | |||
type ScramSha struct { | |||
hashSize int | |||
hashFunc func() hash.Hash | |||
clientNonce string | |||
serverNonce string | |||
salt []byte | |||
i int | |||
saltedPassword []byte | |||
authMessage string | |||
} | |||
var knownMethods = []string{"SCRAM-SHA512", "SCRAM-SHA256", "SCRAM-SHA1"} | |||
// BestMethod returns SCRAM-SHA method we consider the best out of suggested | |||
// by server | |||
func BestMethod(methods string) (string, error) { | |||
for _, m := range knownMethods { | |||
if strings.Index(methods, m) != -1 { | |||
return m, nil | |||
} | |||
} | |||
return "", errors.Errorf( | |||
"None of the server suggested methods [%s] are supported", | |||
methods) | |||
} | |||
// NewScramSha creates context for SCRAM-SHA handling | |||
func NewScramSha(method string) (*ScramSha, error) { | |||
s := &ScramSha{} | |||
if method == knownMethods[0] { | |||
s.hashFunc = sha512.New | |||
s.hashSize = 64 | |||
} else if method == knownMethods[1] { | |||
s.hashFunc = sha256.New | |||
s.hashSize = 32 | |||
} else if method == knownMethods[2] { | |||
s.hashFunc = sha1.New | |||
s.hashSize = 20 | |||
} else { | |||
return nil, errors.Errorf("Unsupported method %s", method) | |||
} | |||
return s, nil | |||
} | |||
// GetStartRequest builds start SCRAM-SHA request to be sent to server | |||
func (s *ScramSha) GetStartRequest(user string) (string, error) { | |||
var err error | |||
s.clientNonce, err = generateClientNonce(24) | |||
if err != nil { | |||
return "", errors.Wrapf(err, "Unable to generate SCRAM-SHA "+ | |||
"start request for user %s", user) | |||
} | |||
message := fmt.Sprintf("n,,n=%s,r=%s", user, s.clientNonce) | |||
s.authMessage = message[3:] | |||
return message, nil | |||
} | |||
// HandleStartResponse handles server response on start SCRAM-SHA request | |||
func (s *ScramSha) HandleStartResponse(response string) error { | |||
parts := strings.Split(response, ",") | |||
if len(parts) != 3 { | |||
return errors.Errorf("expected 3 fields in first SCRAM-SHA-1 "+ | |||
"server message %s", response) | |||
} | |||
if !strings.HasPrefix(parts[0], "r=") || len(parts[0]) < 3 { | |||
return errors.Errorf("Server sent an invalid nonce %s", | |||
parts[0]) | |||
} | |||
if !strings.HasPrefix(parts[1], "s=") || len(parts[1]) < 3 { | |||
return errors.Errorf("Server sent an invalid salt %s", parts[1]) | |||
} | |||
if !strings.HasPrefix(parts[2], "i=") || len(parts[2]) < 3 { | |||
return errors.Errorf("Server sent an invalid iteration count %s", | |||
parts[2]) | |||
} | |||
s.serverNonce = parts[0][2:] | |||
encodedSalt := parts[1][2:] | |||
var err error | |||
s.i, err = strconv.Atoi(parts[2][2:]) | |||
if err != nil { | |||
return errors.Errorf("Iteration count %s must be integer.", | |||
parts[2][2:]) | |||
} | |||
if s.i < 1 { | |||
return errors.New("Iteration count should be positive") | |||
} | |||
if !strings.HasPrefix(s.serverNonce, s.clientNonce) { | |||
return errors.Errorf("Server nonce %s doesn't contain client"+ | |||
" nonce %s", s.serverNonce, s.clientNonce) | |||
} | |||
s.salt, err = base64.StdEncoding.DecodeString(encodedSalt) | |||
if err != nil { | |||
return errors.Wrapf(err, "Unable to decode salt %s", | |||
encodedSalt) | |||
} | |||
s.authMessage = s.authMessage + "," + response | |||
return nil | |||
} | |||
// GetFinalRequest builds final SCRAM-SHA request to be sent to server | |||
func (s *ScramSha) GetFinalRequest(pass string) string { | |||
clientFinalMessageBare := "c=biws,r=" + s.serverNonce | |||
s.authMessage = s.authMessage + "," + clientFinalMessageBare | |||
s.saltedPassword = pbkdf2.Key([]byte(pass), s.salt, s.i, | |||
s.hashSize, s.hashFunc) | |||
clientKey := hmacHash([]byte("Client Key"), s.saltedPassword, s.hashFunc) | |||
storedKey := shaHash(clientKey, s.hashFunc) | |||
clientSignature := hmacHash([]byte(s.authMessage), storedKey, s.hashFunc) | |||
clientProof := make([]byte, len(clientSignature)) | |||
for i := 0; i < len(clientSignature); i++ { | |||
clientProof[i] = clientKey[i] ^ clientSignature[i] | |||
} | |||
return clientFinalMessageBare + ",p=" + | |||
base64.StdEncoding.EncodeToString(clientProof) | |||
} | |||
// HandleFinalResponse handles server's response on final SCRAM-SHA request | |||
func (s *ScramSha) HandleFinalResponse(response string) error { | |||
if strings.Contains(response, ",") || | |||
!strings.HasPrefix(response, "v=") { | |||
return errors.Errorf("Server sent an invalid final message %s", | |||
response) | |||
} | |||
decodedMessage, err := base64.StdEncoding.DecodeString(response[2:]) | |||
if err != nil { | |||
return errors.Wrapf(err, "Unable to decode server message %s", | |||
response[2:]) | |||
} | |||
serverKey := hmacHash([]byte("Server Key"), s.saltedPassword, | |||
s.hashFunc) | |||
serverSignature := hmacHash([]byte(s.authMessage), serverKey, | |||
s.hashFunc) | |||
if string(decodedMessage) != string(serverSignature) { | |||
return errors.Errorf("Server proof %s doesn't match "+ | |||
"the expected: %s", | |||
string(decodedMessage), string(serverSignature)) | |||
} | |||
return nil | |||
} |
@ -0,0 +1,252 @@ | |||
// @author Couchbase <info@couchbase.com> | |||
// @copyright 2018 Couchbase, Inc. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package scramsha provides implementation of client side SCRAM-SHA | |||
// via Http according to https://tools.ietf.org/html/rfc7804 | |||
package scramsha | |||
import ( | |||
"encoding/base64" | |||
"github.com/pkg/errors" | |||
"io" | |||
"io/ioutil" | |||
"net/http" | |||
"strings" | |||
) | |||
// consts used to parse scramsha response from target | |||
const ( | |||
WWWAuthenticate = "WWW-Authenticate" | |||
AuthenticationInfo = "Authentication-Info" | |||
Authorization = "Authorization" | |||
DataPrefix = "data=" | |||
SidPrefix = "sid=" | |||
) | |||
// Request provides implementation of http request that can be retried | |||
type Request struct { | |||
body io.ReadSeeker | |||
// Embed an HTTP request directly. This makes a *Request act exactly | |||
// like an *http.Request so that all meta methods are supported. | |||
*http.Request | |||
} | |||
type lenReader interface { | |||
Len() int | |||
} | |||
// NewRequest creates http request that can be retried | |||
func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { | |||
// Wrap the body in a noop ReadCloser if non-nil. This prevents the | |||
// reader from being closed by the HTTP client. | |||
var rcBody io.ReadCloser | |||
if body != nil { | |||
rcBody = ioutil.NopCloser(body) | |||
} | |||
// Make the request with the noop-closer for the body. | |||
httpReq, err := http.NewRequest(method, url, rcBody) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Check if we can set the Content-Length automatically. | |||
if lr, ok := body.(lenReader); ok { | |||
httpReq.ContentLength = int64(lr.Len()) | |||
} | |||
return &Request{body, httpReq}, nil | |||
} | |||
func encode(str string) string { | |||
return base64.StdEncoding.EncodeToString([]byte(str)) | |||
} | |||
func decode(str string) (string, error) { | |||
bytes, err := base64.StdEncoding.DecodeString(str) | |||
if err != nil { | |||
return "", errors.Errorf("Cannot base64 decode %s", | |||
str) | |||
} | |||
return string(bytes), err | |||
} | |||
func trimPrefix(s, prefix string) (string, error) { | |||
l := len(s) | |||
trimmed := strings.TrimPrefix(s, prefix) | |||
if l == len(trimmed) { | |||
return trimmed, errors.Errorf("Prefix %s not found in %s", | |||
prefix, s) | |||
} | |||
return trimmed, nil | |||
} | |||
func drainBody(resp *http.Response) { | |||
defer resp.Body.Close() | |||
io.Copy(ioutil.Discard, resp.Body) | |||
} | |||
// DoScramSha performs SCRAM-SHA handshake via Http | |||
func DoScramSha(req *Request, | |||
username string, | |||
password string, | |||
client *http.Client) (*http.Response, error) { | |||
method := "SCRAM-SHA-512" | |||
s, err := NewScramSha("SCRAM-SHA512") | |||
if err != nil { | |||
return nil, errors.Wrap(err, | |||
"Unable to initialize SCRAM-SHA handler") | |||
} | |||
message, err := s.GetStartRequest(username) | |||
if err != nil { | |||
return nil, err | |||
} | |||
encodedMessage := method + " " + DataPrefix + encode(message) | |||
req.Header.Set(Authorization, encodedMessage) | |||
res, err := client.Do(req.Request) | |||
if err != nil { | |||
return nil, errors.Wrap(err, "Problem sending SCRAM-SHA start"+ | |||
"request") | |||
} | |||
if res.StatusCode != http.StatusUnauthorized { | |||
return res, nil | |||
} | |||
authHeader := res.Header.Get(WWWAuthenticate) | |||
if authHeader == "" { | |||
drainBody(res) | |||
return nil, errors.Errorf("Header %s is not populated in "+ | |||
"SCRAM-SHA start response", WWWAuthenticate) | |||
} | |||
authHeader, err = trimPrefix(authHeader, method+" ") | |||
if err != nil { | |||
if strings.HasPrefix(authHeader, "Basic ") { | |||
// user not found | |||
return res, nil | |||
} | |||
drainBody(res) | |||
return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+ | |||
"start response %s", authHeader) | |||
} | |||
drainBody(res) | |||
sid, response, err := parseSidAndData(authHeader) | |||
if err != nil { | |||
return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+ | |||
"start response %s", authHeader) | |||
} | |||
err = s.HandleStartResponse(response) | |||
if err != nil { | |||
return nil, errors.Wrapf(err, "Error parsing SCRAM-SHA start "+ | |||
"response %s", response) | |||
} | |||
message = s.GetFinalRequest(password) | |||
encodedMessage = method + " " + SidPrefix + sid + "," + DataPrefix + | |||
encode(message) | |||
req.Header.Set(Authorization, encodedMessage) | |||
// rewind request body so it can be resent again | |||
if req.body != nil { | |||
if _, err = req.body.Seek(0, 0); err != nil { | |||
return nil, errors.Errorf("Failed to seek body: %v", | |||
err) | |||
} | |||
} | |||
res, err = client.Do(req.Request) | |||
if err != nil { | |||
return nil, errors.Wrap(err, "Problem sending SCRAM-SHA final"+ | |||
"request") | |||
} | |||
if res.StatusCode == http.StatusUnauthorized { | |||
// TODO retrieve and return error | |||
return res, nil | |||
} | |||
if res.StatusCode >= http.StatusInternalServerError { | |||
// in this case we cannot expect server to set headers properly | |||
return res, nil | |||
} | |||
authHeader = res.Header.Get(AuthenticationInfo) | |||
if authHeader == "" { | |||
drainBody(res) | |||
return nil, errors.Errorf("Header %s is not populated in "+ | |||
"SCRAM-SHA final response", AuthenticationInfo) | |||
} | |||
finalSid, response, err := parseSidAndData(authHeader) | |||
if err != nil { | |||
drainBody(res) | |||
return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+ | |||
"final response %s", authHeader) | |||
} | |||
if finalSid != sid { | |||
drainBody(res) | |||
return nil, errors.Errorf("Sid %s returned by server "+ | |||
"doesn't match the original sid %s", finalSid, sid) | |||
} | |||
err = s.HandleFinalResponse(response) | |||
if err != nil { | |||
drainBody(res) | |||
return nil, errors.Wrapf(err, | |||
"Error handling SCRAM-SHA final server response %s", | |||
response) | |||
} | |||
return res, nil | |||
} | |||
func parseSidAndData(authHeader string) (string, string, error) { | |||
sidIndex := strings.Index(authHeader, SidPrefix) | |||
if sidIndex < 0 { | |||
return "", "", errors.Errorf("Cannot find %s in %s", | |||
SidPrefix, authHeader) | |||
} | |||
sidEndIndex := strings.Index(authHeader, ",") | |||
if sidEndIndex < 0 { | |||
return "", "", errors.Errorf("Cannot find ',' in %s", | |||
authHeader) | |||
} | |||
sid := authHeader[sidIndex+len(SidPrefix) : sidEndIndex] | |||
dataIndex := strings.Index(authHeader, DataPrefix) | |||
if dataIndex < 0 { | |||
return "", "", errors.Errorf("Cannot find %s in %s", | |||
DataPrefix, authHeader) | |||
} | |||
data, err := decode(authHeader[dataIndex+len(DataPrefix):]) | |||
if err != nil { | |||
return "", "", err | |||
} | |||
return sid, data, nil | |||
} |
@ -0,0 +1,19 @@ | |||
Copyright (c) 2013 Couchbase, Inc. | |||
Permission is hereby granted, free of charge, to any person obtaining a copy of | |||
this software and associated documentation files (the "Software"), to deal in | |||
the Software without restriction, including without limitation the rights to | |||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies | |||
of the Software, and to permit persons to whom the Software is furnished to do | |||
so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in all | |||
copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
SOFTWARE. |
@ -0,0 +1,32 @@ | |||
package couchbase | |||
import () | |||
// Sample data: | |||
// {"disabled":["12333", "22244"],"uid":"132492431","auditdEnabled":true, | |||
// "disabledUsers":[{"name":"bill","domain":"local"},{"name":"bob","domain":"local"}], | |||
// "logPath":"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs", | |||
// "rotateInterval":86400,"rotateSize":20971520} | |||
type AuditSpec struct { | |||
Disabled []uint32 `json:"disabled"` | |||
Uid string `json:"uid"` | |||
AuditdEnabled bool `json:"auditdEnabled` | |||
DisabledUsers []AuditUser `json:"disabledUsers"` | |||
LogPath string `json:"logPath"` | |||
RotateInterval int64 `json:"rotateInterval"` | |||
RotateSize int64 `json:"rotateSize"` | |||
} | |||
type AuditUser struct { | |||
Name string `json:"name"` | |||
Domain string `json:"domain"` | |||
} | |||
func (c *Client) GetAuditSpec() (*AuditSpec, error) { | |||
ret := &AuditSpec{} | |||
err := c.parseURLResponse("/settings/audit", ret) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return ret, nil | |||
} |
@ -0,0 +1,387 @@ | |||
package couchbase | |||
import ( | |||
"errors" | |||
"sync/atomic" | |||
"time" | |||
"github.com/couchbase/gomemcached" | |||
"github.com/couchbase/gomemcached/client" | |||
"github.com/couchbase/goutils/logging" | |||
) | |||
// GenericMcdAuthHandler is a kind of AuthHandler that performs | |||
// special auth exchange (like non-standard auth, possibly followed by | |||
// select-bucket). | |||
type GenericMcdAuthHandler interface { | |||
AuthHandler | |||
AuthenticateMemcachedConn(host string, conn *memcached.Client) error | |||
} | |||
// Error raised when a connection can't be retrieved from a pool. | |||
var TimeoutError = errors.New("timeout waiting to build connection") | |||
var errClosedPool = errors.New("the connection pool is closed") | |||
var errNoPool = errors.New("no connection pool") | |||
// Default timeout for retrieving a connection from the pool. | |||
var ConnPoolTimeout = time.Hour * 24 * 30 | |||
// overflow connection closer cycle time | |||
var ConnCloserInterval = time.Second * 30 | |||
// ConnPoolAvailWaitTime is the amount of time to wait for an existing | |||
// connection from the pool before considering the creation of a new | |||
// one. | |||
var ConnPoolAvailWaitTime = time.Millisecond | |||
type connectionPool struct { | |||
host string | |||
mkConn func(host string, ah AuthHandler) (*memcached.Client, error) | |||
auth AuthHandler | |||
connections chan *memcached.Client | |||
createsem chan bool | |||
bailOut chan bool | |||
poolSize int | |||
connCount uint64 | |||
inUse bool | |||
} | |||
func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int) *connectionPool { | |||
connSize := poolSize | |||
if closer { | |||
connSize += poolOverflow | |||
} | |||
rv := &connectionPool{ | |||
host: host, | |||
connections: make(chan *memcached.Client, connSize), | |||
createsem: make(chan bool, poolSize+poolOverflow), | |||
mkConn: defaultMkConn, | |||
auth: ah, | |||
poolSize: poolSize, | |||
} | |||
if closer { | |||
rv.bailOut = make(chan bool, 1) | |||
go rv.connCloser() | |||
} | |||
return rv | |||
} | |||
// ConnPoolTimeout is notified whenever connections are acquired from a pool. | |||
var ConnPoolCallback func(host string, source string, start time.Time, err error) | |||
func defaultMkConn(host string, ah AuthHandler) (*memcached.Client, error) { | |||
var features memcached.Features | |||
conn, err := memcached.Connect("tcp", host) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if TCPKeepalive == true { | |||
conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second) | |||
} | |||
if EnableMutationToken == true { | |||
features = append(features, memcached.FeatureMutationToken) | |||
} | |||
if EnableDataType == true { | |||
features = append(features, memcached.FeatureDataType) | |||
} | |||
if EnableXattr == true { | |||
features = append(features, memcached.FeatureXattr) | |||
} | |||
if len(features) > 0 { | |||
if DefaultTimeout > 0 { | |||
conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout)) | |||
} | |||
res, err := conn.EnableFeatures(features) | |||
if DefaultTimeout > 0 { | |||
conn.SetDeadline(noDeadline) | |||
} | |||
if err != nil && isTimeoutError(err) { | |||
conn.Close() | |||
return nil, err | |||
} | |||
if err != nil || res.Status != gomemcached.SUCCESS { | |||
logging.Warnf("Unable to enable features %v", err) | |||
} | |||
} | |||
if gah, ok := ah.(GenericMcdAuthHandler); ok { | |||
err = gah.AuthenticateMemcachedConn(host, conn) | |||
if err != nil { | |||
conn.Close() | |||
return nil, err | |||
} | |||
return conn, nil | |||
} | |||
name, pass, bucket := ah.GetCredentials() | |||
if name != "default" { | |||
_, err = conn.Auth(name, pass) | |||
if err != nil { | |||
conn.Close() | |||
return nil, err | |||
} | |||
// Select bucket (Required for cb_auth creds) | |||
// Required when doing auth with _admin credentials | |||
if bucket != "" && bucket != name { | |||
_, err = conn.SelectBucket(bucket) | |||
if err != nil { | |||
conn.Close() | |||
return nil, err | |||
} | |||
} | |||
} | |||
return conn, nil | |||
} | |||
func (cp *connectionPool) Close() (err error) { | |||
defer func() { | |||
if recover() != nil { | |||
err = errors.New("connectionPool.Close error") | |||
} | |||
}() | |||
if cp.bailOut != nil { | |||
// defensively, we won't wait if the channel is full | |||
select { | |||
case cp.bailOut <- false: | |||
default: | |||
} | |||
} | |||
close(cp.connections) | |||
for c := range cp.connections { | |||
c.Close() | |||
} | |||
return | |||
} | |||
func (cp *connectionPool) Node() string { | |||
return cp.host | |||
} | |||
func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) { | |||
if cp == nil { | |||
return nil, errNoPool | |||
} | |||
path := "" | |||
if ConnPoolCallback != nil { | |||
defer func(path *string, start time.Time) { | |||
ConnPoolCallback(cp.host, *path, start, err) | |||
}(&path, time.Now()) | |||
} | |||
path = "short-circuit" | |||
// short-circuit available connetions. | |||
select { | |||
case rv, isopen := <-cp.connections: | |||
if !isopen { | |||
return nil, errClosedPool | |||
} | |||
atomic.AddUint64(&cp.connCount, 1) | |||
return rv, nil | |||
default: | |||
} | |||
t := time.NewTimer(ConnPoolAvailWaitTime) | |||
defer t.Stop() | |||
// Try to grab an available connection within 1ms | |||
select { | |||
case rv, isopen := <-cp.connections: | |||
path = "avail1" | |||
if !isopen { | |||
return nil, errClosedPool | |||
} | |||
atomic.AddUint64(&cp.connCount, 1) | |||
return rv, nil | |||
case <-t.C: | |||
// No connection came around in time, let's see | |||
// whether we can get one or build a new one first. | |||
t.Reset(d) // Reuse the timer for the full timeout. | |||
select { | |||
case rv, isopen := <-cp.connections: | |||
path = "avail2" | |||
if !isopen { | |||
return nil, errClosedPool | |||
} | |||
atomic.AddUint64(&cp.connCount, 1) | |||
return rv, nil | |||
case cp.createsem <- true: | |||
path = "create" | |||
// Build a connection if we can't get a real one. | |||
// This can potentially be an overflow connection, or | |||
// a pooled connection. | |||
rv, err := cp.mkConn(cp.host, cp.auth) | |||
if err != nil { | |||
// On error, release our create hold | |||
<-cp.createsem | |||
} else { | |||
atomic.AddUint64(&cp.connCount, 1) | |||
} | |||
return rv, err | |||
case <-t.C: | |||
return nil, ErrTimeout | |||
} | |||
} | |||
} | |||
func (cp *connectionPool) Get() (*memcached.Client, error) { | |||
return cp.GetWithTimeout(ConnPoolTimeout) | |||
} | |||
func (cp *connectionPool) Return(c *memcached.Client) { | |||
if c == nil { | |||
return | |||
} | |||
if cp == nil { | |||
c.Close() | |||
} | |||
if c.IsHealthy() { | |||
defer func() { | |||
if recover() != nil { | |||
// This happens when the pool has already been | |||
// closed and we're trying to return a | |||
// connection to it anyway. Just close the | |||
// connection. | |||
c.Close() | |||
} | |||
}() | |||
select { | |||
case cp.connections <- c: | |||
default: | |||
<-cp.createsem | |||
c.Close() | |||
} | |||
} else { | |||
<-cp.createsem | |||
c.Close() | |||
} | |||
} | |||
// give the ability to discard a connection from a pool | |||
// useful for ditching connections to the wrong node after a rebalance | |||
func (cp *connectionPool) Discard(c *memcached.Client) { | |||
<-cp.createsem | |||
c.Close() | |||
} | |||
// asynchronous connection closer | |||
func (cp *connectionPool) connCloser() { | |||
var connCount uint64 | |||
t := time.NewTimer(ConnCloserInterval) | |||
defer t.Stop() | |||
for { | |||
connCount = cp.connCount | |||
// we don't exist anymore! bail out! | |||
select { | |||
case <-cp.bailOut: | |||
return | |||
case <-t.C: | |||
} | |||
t.Reset(ConnCloserInterval) | |||
// no overflow connections open or sustained requests for connections | |||
// nothing to do until the next cycle | |||
if len(cp.connections) <= cp.poolSize || | |||
ConnCloserInterval/ConnPoolAvailWaitTime < time.Duration(cp.connCount-connCount) { | |||
continue | |||
} | |||
// close overflow connections now that they are not needed | |||
for c := range cp.connections { | |||
select { | |||
case <-cp.bailOut: | |||
return | |||
default: | |||
} | |||
// bail out if close did not work out | |||
if !cp.connCleanup(c) { | |||
return | |||
} | |||
if len(cp.connections) <= cp.poolSize { | |||
break | |||
} | |||
} | |||
} | |||
} | |||
// close connection with recovery on error | |||
func (cp *connectionPool) connCleanup(c *memcached.Client) (rv bool) { | |||
// just in case we are closing a connection after | |||
// bailOut has been sent but we haven't yet read it | |||
defer func() { | |||
if recover() != nil { | |||
rv = false | |||
} | |||
}() | |||
rv = true | |||
c.Close() | |||
<-cp.createsem | |||
return | |||
} | |||
func (cp *connectionPool) StartTapFeed(args *memcached.TapArguments) (*memcached.TapFeed, error) { | |||
if cp == nil { | |||
return nil, errNoPool | |||
} | |||
mc, err := cp.Get() | |||
if err != nil { | |||
return nil, err | |||
} | |||
// A connection can't be used after TAP; Dont' count it against the | |||
// connection pool capacity | |||
<-cp.createsem | |||
return mc.StartTapFeed(*args) | |||
} | |||
const DEFAULT_WINDOW_SIZE = 20 * 1024 * 1024 // 20 Mb | |||
func (cp *connectionPool) StartUprFeed(name string, sequence uint32, dcp_buffer_size uint32, data_chan_size int) (*memcached.UprFeed, error) { | |||
if cp == nil { | |||
return nil, errNoPool | |||
} | |||
mc, err := cp.Get() | |||
if err != nil { | |||
return nil, err | |||
} | |||
// A connection can't be used after it has been allocated to UPR; | |||
// Dont' count it against the connection pool capacity | |||
<-cp.createsem | |||
uf, err := mc.NewUprFeed() | |||
if err != nil { | |||
return nil, err | |||
} | |||
if err := uf.UprOpen(name, sequence, dcp_buffer_size); err != nil { | |||
return nil, err | |||
} | |||
if err := uf.StartFeedWithConfig(data_chan_size); err != nil { | |||
return nil, err | |||
} | |||
return uf, nil | |||
} |
@ -0,0 +1,288 @@ | |||
package couchbase | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"fmt" | |||
"github.com/couchbase/goutils/logging" | |||
"io/ioutil" | |||
"net/http" | |||
) | |||
// ViewDefinition represents a single view within a design document. | |||
type ViewDefinition struct { | |||
Map string `json:"map"` | |||
Reduce string `json:"reduce,omitempty"` | |||
} | |||
// DDoc is the document body of a design document specifying a view. | |||
type DDoc struct { | |||
Language string `json:"language,omitempty"` | |||
Views map[string]ViewDefinition `json:"views"` | |||
} | |||
// DDocsResult represents the result from listing the design | |||
// documents. | |||
type DDocsResult struct { | |||
Rows []struct { | |||
DDoc struct { | |||
Meta map[string]interface{} | |||
JSON DDoc | |||
} `json:"doc"` | |||
} `json:"rows"` | |||
} | |||
// GetDDocs lists all design documents | |||
func (b *Bucket) GetDDocs() (DDocsResult, error) { | |||
var ddocsResult DDocsResult | |||
b.RLock() | |||
pool := b.pool | |||
uri := b.DDocs.URI | |||
b.RUnlock() | |||
// MB-23555 ephemeral buckets have no ddocs | |||
if uri == "" { | |||
return DDocsResult{}, nil | |||
} | |||
err := pool.client.parseURLResponse(uri, &ddocsResult) | |||
if err != nil { | |||
return DDocsResult{}, err | |||
} | |||
return ddocsResult, nil | |||
} | |||
func (b *Bucket) GetDDocWithRetry(docname string, into interface{}) error { | |||
ddocURI := fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) | |||
err := b.parseAPIResponse(ddocURI, &into) | |||
if err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (b *Bucket) GetDDocsWithRetry() (DDocsResult, error) { | |||
var ddocsResult DDocsResult | |||
b.RLock() | |||
uri := b.DDocs.URI | |||
b.RUnlock() | |||
// MB-23555 ephemeral buckets have no ddocs | |||
if uri == "" { | |||
return DDocsResult{}, nil | |||
} | |||
err := b.parseURLResponse(uri, &ddocsResult) | |||
if err != nil { | |||
return DDocsResult{}, err | |||
} | |||
return ddocsResult, nil | |||
} | |||
func (b *Bucket) ddocURL(docname string) (string, error) { | |||
u, err := b.randomBaseURL() | |||
if err != nil { | |||
return "", err | |||
} | |||
u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) | |||
return u.String(), nil | |||
} | |||
func (b *Bucket) ddocURLNext(nodeId int, docname string) (string, int, error) { | |||
u, selected, err := b.randomNextURL(nodeId) | |||
if err != nil { | |||
return "", -1, err | |||
} | |||
u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) | |||
return u.String(), selected, nil | |||
} | |||
const ABS_MAX_RETRIES = 10 | |||
const ABS_MIN_RETRIES = 3 | |||
func (b *Bucket) getMaxRetries() (int, error) { | |||
maxRetries := len(b.Nodes()) | |||
if maxRetries == 0 { | |||
return 0, fmt.Errorf("No available Couch rest URLs") | |||
} | |||
if maxRetries > ABS_MAX_RETRIES { | |||
maxRetries = ABS_MAX_RETRIES | |||
} else if maxRetries < ABS_MIN_RETRIES { | |||
maxRetries = ABS_MIN_RETRIES | |||
} | |||
return maxRetries, nil | |||
} | |||
// PutDDoc installs a design document. | |||
func (b *Bucket) PutDDoc(docname string, value interface{}) error { | |||
var Err error | |||
maxRetries, err := b.getMaxRetries() | |||
if err != nil { | |||
return err | |||
} | |||
lastNode := START_NODE_ID | |||
for retryCount := 0; retryCount < maxRetries; retryCount++ { | |||
Err = nil | |||
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) | |||
if err != nil { | |||
return err | |||
} | |||
lastNode = selectedNode | |||
logging.Infof(" Trying with selected node %d", selectedNode) | |||
j, err := json.Marshal(value) | |||
if err != nil { | |||
return err | |||
} | |||
req, err := http.NewRequest("PUT", ddocU, bytes.NewReader(j)) | |||
if err != nil { | |||
return err | |||
} | |||
req.Header.Set("Content-Type", "application/json") | |||
err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */)) | |||
if err != nil { | |||
return err | |||
} | |||
res, err := doHTTPRequest(req) | |||
if err != nil { | |||
return err | |||
} | |||
if res.StatusCode != 201 { | |||
body, _ := ioutil.ReadAll(res.Body) | |||
Err = fmt.Errorf("error installing view: %v / %s", | |||
res.Status, body) | |||
logging.Errorf(" Error in PutDDOC %v. Retrying...", Err) | |||
res.Body.Close() | |||
b.Refresh() | |||
continue | |||
} | |||
res.Body.Close() | |||
break | |||
} | |||
return Err | |||
} | |||
// GetDDoc retrieves a specific a design doc. | |||
func (b *Bucket) GetDDoc(docname string, into interface{}) error { | |||
var Err error | |||
var res *http.Response | |||
maxRetries, err := b.getMaxRetries() | |||
if err != nil { | |||
return err | |||
} | |||
lastNode := START_NODE_ID | |||
for retryCount := 0; retryCount < maxRetries; retryCount++ { | |||
Err = nil | |||
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) | |||
if err != nil { | |||
return err | |||
} | |||
lastNode = selectedNode | |||
logging.Infof(" Trying with selected node %d", selectedNode) | |||
req, err := http.NewRequest("GET", ddocU, nil) | |||
if err != nil { | |||
return err | |||
} | |||
req.Header.Set("Content-Type", "application/json") | |||
err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */)) | |||
if err != nil { | |||
return err | |||
} | |||
res, err = doHTTPRequest(req) | |||
if err != nil { | |||
return err | |||
} | |||
if res.StatusCode != 200 { | |||
body, _ := ioutil.ReadAll(res.Body) | |||
Err = fmt.Errorf("error reading view: %v / %s", | |||
res.Status, body) | |||
logging.Errorf(" Error in GetDDOC %v Retrying...", Err) | |||
b.Refresh() | |||
res.Body.Close() | |||
continue | |||
} | |||
defer res.Body.Close() | |||
break | |||
} | |||
if Err != nil { | |||
return Err | |||
} | |||
d := json.NewDecoder(res.Body) | |||
return d.Decode(into) | |||
} | |||
// DeleteDDoc removes a design document. | |||
func (b *Bucket) DeleteDDoc(docname string) error { | |||
var Err error | |||
maxRetries, err := b.getMaxRetries() | |||
if err != nil { | |||
return err | |||
} | |||
lastNode := START_NODE_ID | |||
for retryCount := 0; retryCount < maxRetries; retryCount++ { | |||
Err = nil | |||
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) | |||
if err != nil { | |||
return err | |||
} | |||
lastNode = selectedNode | |||
logging.Infof(" Trying with selected node %d", selectedNode) | |||
req, err := http.NewRequest("DELETE", ddocU, nil) | |||
if err != nil { | |||
return err | |||
} | |||
req.Header.Set("Content-Type", "application/json") | |||
err = maybeAddAuth(req, b.authHandler(false /* bucket not already locked */)) | |||
if err != nil { | |||
return err | |||
} | |||
res, err := doHTTPRequest(req) | |||
if err != nil { | |||
return err | |||
} | |||
if res.StatusCode != 200 { | |||
body, _ := ioutil.ReadAll(res.Body) | |||
Err = fmt.Errorf("error deleting view : %v / %s", res.Status, body) | |||
logging.Errorf(" Error in DeleteDDOC %v. Retrying ... ", Err) | |||
b.Refresh() | |||
res.Body.Close() | |||
continue | |||
} | |||
res.Body.Close() | |||
break | |||
} | |||
return Err | |||
} |
@ -0,0 +1,300 @@ | |||
package couchbase | |||
import ( | |||
"fmt" | |||
"github.com/couchbase/goutils/logging" | |||
"sync" | |||
) | |||
type PersistTo uint8 | |||
const ( | |||
PersistNone = PersistTo(0x00) | |||
PersistMaster = PersistTo(0x01) | |||
PersistOne = PersistTo(0x02) | |||
PersistTwo = PersistTo(0x03) | |||
PersistThree = PersistTo(0x04) | |||
PersistFour = PersistTo(0x05) | |||
) | |||
type ObserveTo uint8 | |||
const ( | |||
ObserveNone = ObserveTo(0x00) | |||
ObserveReplicateOne = ObserveTo(0x01) | |||
ObserveReplicateTwo = ObserveTo(0x02) | |||
ObserveReplicateThree = ObserveTo(0x03) | |||
ObserveReplicateFour = ObserveTo(0x04) | |||
) | |||
type JobType uint8 | |||
const ( | |||
OBSERVE = JobType(0x00) | |||
PERSIST = JobType(0x01) | |||
) | |||
type ObservePersistJob struct { | |||
vb uint16 | |||
vbuuid uint64 | |||
hostname string | |||
jobType JobType | |||
failover uint8 | |||
lastPersistedSeqNo uint64 | |||
currentSeqNo uint64 | |||
resultChan chan *ObservePersistJob | |||
errorChan chan *OPErrResponse | |||
} | |||
type OPErrResponse struct { | |||
vb uint16 | |||
vbuuid uint64 | |||
err error | |||
job *ObservePersistJob | |||
} | |||
var ObservePersistPool = NewPool(1024) | |||
var OPJobChan = make(chan *ObservePersistJob, 1024) | |||
var OPJobDone = make(chan bool) | |||
var wg sync.WaitGroup | |||
func (b *Bucket) StartOPPollers(maxWorkers int) { | |||
for i := 0; i < maxWorkers; i++ { | |||
go b.OPJobPoll() | |||
wg.Add(1) | |||
} | |||
wg.Wait() | |||
} | |||
func (b *Bucket) SetObserveAndPersist(nPersist PersistTo, nObserve ObserveTo) (err error) { | |||
numNodes := len(b.Nodes()) | |||
if int(nPersist) > numNodes || int(nObserve) > numNodes { | |||
return fmt.Errorf("Not enough healthy nodes in the cluster") | |||
} | |||
if int(nPersist) > (b.Replicas+1) || int(nObserve) > b.Replicas { | |||
return fmt.Errorf("Not enough replicas in the cluster") | |||
} | |||
if EnableMutationToken == false { | |||
return fmt.Errorf("Mutation Tokens not enabled ") | |||
} | |||
b.ds = &DurablitySettings{Persist: PersistTo(nPersist), Observe: ObserveTo(nObserve)} | |||
return | |||
} | |||
func (b *Bucket) ObserveAndPersistPoll(vb uint16, vbuuid uint64, seqNo uint64) (err error, failover bool) { | |||
b.RLock() | |||
ds := b.ds | |||
b.RUnlock() | |||
if ds == nil { | |||
return | |||
} | |||
nj := 0 // total number of jobs | |||
resultChan := make(chan *ObservePersistJob, 10) | |||
errChan := make(chan *OPErrResponse, 10) | |||
nodes := b.GetNodeList(vb) | |||
if int(ds.Observe) > len(nodes) || int(ds.Persist) > len(nodes) { | |||
return fmt.Errorf("Not enough healthy nodes in the cluster"), false | |||
} | |||
logging.Infof("Node list %v", nodes) | |||
if ds.Observe >= ObserveReplicateOne { | |||
// create a job for each host | |||
for i := ObserveReplicateOne; i < ds.Observe+1; i++ { | |||
opJob := ObservePersistPool.Get() | |||
opJob.vb = vb | |||
opJob.vbuuid = vbuuid | |||
opJob.jobType = OBSERVE | |||
opJob.hostname = nodes[i] | |||
opJob.resultChan = resultChan | |||
opJob.errorChan = errChan | |||
OPJobChan <- opJob | |||
nj++ | |||
} | |||
} | |||
if ds.Persist >= PersistMaster { | |||
for i := PersistMaster; i < ds.Persist+1; i++ { | |||
opJob := ObservePersistPool.Get() | |||
opJob.vb = vb | |||
opJob.vbuuid = vbuuid | |||
opJob.jobType = PERSIST | |||
opJob.hostname = nodes[i] | |||
opJob.resultChan = resultChan | |||
opJob.errorChan = errChan | |||
OPJobChan <- opJob | |||
nj++ | |||
} | |||
} | |||
ok := true | |||
for ok { | |||
select { | |||
case res := <-resultChan: | |||
jobDone := false | |||
if res.failover == 0 { | |||
// no failover | |||
if res.jobType == PERSIST { | |||
if res.lastPersistedSeqNo >= seqNo { | |||
jobDone = true | |||
} | |||
} else { | |||
if res.currentSeqNo >= seqNo { | |||
jobDone = true | |||
} | |||
} | |||
if jobDone == true { | |||
nj-- | |||
ObservePersistPool.Put(res) | |||
} else { | |||
// requeue this job | |||
OPJobChan <- res | |||
} | |||
} else { | |||
// Not currently handling failover scenarios TODO | |||
nj-- | |||
ObservePersistPool.Put(res) | |||
failover = true | |||
} | |||
if nj == 0 { | |||
// done with all the jobs | |||
ok = false | |||
close(resultChan) | |||
close(errChan) | |||
} | |||
case Err := <-errChan: | |||
logging.Errorf("Error in Observe/Persist %v", Err.err) | |||
err = fmt.Errorf("Error in Observe/Persist job %v", Err.err) | |||
nj-- | |||
ObservePersistPool.Put(Err.job) | |||
if nj == 0 { | |||
close(resultChan) | |||
close(errChan) | |||
ok = false | |||
} | |||
} | |||
} | |||
return | |||
} | |||
func (b *Bucket) OPJobPoll() { | |||
ok := true | |||
for ok == true { | |||
select { | |||
case job := <-OPJobChan: | |||
pool := b.getConnPoolByHost(job.hostname, false /* bucket not already locked */) | |||
if pool == nil { | |||
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} | |||
errRes.err = fmt.Errorf("Pool not found for host %v", job.hostname) | |||
errRes.job = job | |||
job.errorChan <- errRes | |||
continue | |||
} | |||
conn, err := pool.Get() | |||
if err != nil { | |||
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} | |||
errRes.err = fmt.Errorf("Unable to get connection from pool %v", err) | |||
errRes.job = job | |||
job.errorChan <- errRes | |||
continue | |||
} | |||
res, err := conn.ObserveSeq(job.vb, job.vbuuid) | |||
if err != nil { | |||
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} | |||
errRes.err = fmt.Errorf("Command failed %v", err) | |||
errRes.job = job | |||
job.errorChan <- errRes | |||
continue | |||
} | |||
pool.Return(conn) | |||
job.lastPersistedSeqNo = res.LastPersistedSeqNo | |||
job.currentSeqNo = res.CurrentSeqNo | |||
job.failover = res.Failover | |||
job.resultChan <- job | |||
case <-OPJobDone: | |||
logging.Infof("Observe Persist Poller exitting") | |||
ok = false | |||
} | |||
} | |||
wg.Done() | |||
} | |||
func (b *Bucket) GetNodeList(vb uint16) []string { | |||
vbm := b.VBServerMap() | |||
if len(vbm.VBucketMap) < int(vb) { | |||
logging.Infof("vbmap smaller than vblist") | |||
return nil | |||
} | |||
nodes := make([]string, len(vbm.VBucketMap[vb])) | |||
for i := 0; i < len(vbm.VBucketMap[vb]); i++ { | |||
n := vbm.VBucketMap[vb][i] | |||
if n < 0 { | |||
continue | |||
} | |||
node := b.getMasterNode(n) | |||
if len(node) > 1 { | |||
nodes[i] = node | |||
} | |||
continue | |||
} | |||
return nodes | |||
} | |||
//pool of ObservePersist Jobs | |||
type OPpool struct { | |||
pool chan *ObservePersistJob | |||
} | |||
// NewPool creates a new pool of jobs | |||
func NewPool(max int) *OPpool { | |||
return &OPpool{ | |||
pool: make(chan *ObservePersistJob, max), | |||
} | |||
} | |||
// Borrow a Client from the pool. | |||
func (p *OPpool) Get() *ObservePersistJob { | |||
var o *ObservePersistJob | |||
select { | |||
case o = <-p.pool: | |||
default: | |||
o = &ObservePersistJob{} | |||
} | |||
return o | |||
} | |||
// Return returns a Client to the pool. | |||
func (p *OPpool) Put(o *ObservePersistJob) { | |||
select { | |||
case p.pool <- o: | |||
default: | |||
// let it go, let it go... | |||
} | |||
} |
@ -0,0 +1,209 @@ | |||
package couchbase | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"github.com/couchbase/goutils/logging" | |||
"io" | |||
"io/ioutil" | |||
"math/rand" | |||
"net" | |||
"net/http" | |||
"time" | |||
"unsafe" | |||
) | |||
// Bucket auto-updater gets the latest version of the bucket config from | |||
// the server. If the configuration has changed then updated the local | |||
// bucket information. If the bucket has been deleted then notify anyone | |||
// who is holding a reference to this bucket | |||
const MAX_RETRY_COUNT = 5 | |||
const DISCONNECT_PERIOD = 120 * time.Second | |||
type NotifyFn func(bucket string, err error) | |||
// Use TCP keepalive to detect half close sockets | |||
var updaterTransport http.RoundTripper = &http.Transport{ | |||
Proxy: http.ProxyFromEnvironment, | |||
Dial: (&net.Dialer{ | |||
Timeout: 30 * time.Second, | |||
KeepAlive: 30 * time.Second, | |||
}).Dial, | |||
} | |||
var updaterHTTPClient = &http.Client{Transport: updaterTransport} | |||
func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) { | |||
var err error | |||
var res *http.Response | |||
for i := 0; i < HTTP_MAX_RETRY; i++ { | |||
res, err = updaterHTTPClient.Do(req) | |||
if err != nil && isHttpConnError(err) { | |||
continue | |||
} | |||
break | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
return res, err | |||
} | |||
func (b *Bucket) RunBucketUpdater(notify NotifyFn) { | |||
go func() { | |||
err := b.UpdateBucket() | |||
if err != nil { | |||
if notify != nil { | |||
notify(b.GetName(), err) | |||
} | |||
logging.Errorf(" Bucket Updater exited with err %v", err) | |||
} | |||
}() | |||
} | |||
func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) { | |||
if !bucketLocked { | |||
b.Lock() | |||
defer b.Unlock() | |||
} | |||
old := b.connPools | |||
b.connPools = unsafe.Pointer(&with) | |||
if old != nil { | |||
for _, pool := range *(*[]*connectionPool)(old) { | |||
if pool != nil && pool.inUse == false { | |||
pool.Close() | |||
} | |||
} | |||
} | |||
return | |||
} | |||
func (b *Bucket) UpdateBucket() error { | |||
var failures int | |||
var returnErr error | |||
for { | |||
if failures == MAX_RETRY_COUNT { | |||
logging.Errorf(" Maximum failures reached. Exiting loop...") | |||
return fmt.Errorf("Max failures reached. Last Error %v", returnErr) | |||
} | |||
nodes := b.Nodes() | |||
if len(nodes) < 1 { | |||
return fmt.Errorf("No healthy nodes found") | |||
} | |||
startNode := rand.Intn(len(nodes)) | |||
node := nodes[(startNode)%len(nodes)] | |||
streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, b.GetName()) | |||
logging.Infof(" Trying with %s", streamUrl) | |||
req, err := http.NewRequest("GET", streamUrl, nil) | |||
if err != nil { | |||
return err | |||
} | |||
b.RLock() | |||
pool := b.pool | |||
bucketName := b.Name | |||
b.RUnlock() | |||
scopes, err := getScopesAndCollections(pool, bucketName) | |||
if err != nil { | |||
return err | |||
} | |||
// Lock here to avoid having pool closed under us. | |||
b.RLock() | |||
err = maybeAddAuth(req, b.pool.client.ah) | |||
b.RUnlock() | |||
if err != nil { | |||
return err | |||
} | |||
res, err := doHTTPRequestForUpdate(req) | |||
if err != nil { | |||
return err | |||
} | |||
if res.StatusCode != 200 { | |||
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512)) | |||
logging.Errorf("Failed to connect to host, unexpected status code: %v. Body %s", res.StatusCode, bod) | |||
res.Body.Close() | |||
returnErr = fmt.Errorf("Failed to connect to host. Status %v Body %s", res.StatusCode, bod) | |||
failures++ | |||
continue | |||
} | |||
dec := json.NewDecoder(res.Body) | |||
tmpb := &Bucket{} | |||
for { | |||
err := dec.Decode(&tmpb) | |||
if err != nil { | |||
returnErr = err | |||
res.Body.Close() | |||
break | |||
} | |||
// if we got here, reset failure count | |||
failures = 0 | |||
b.Lock() | |||
// mark all the old connection pools for deletion | |||
pools := b.getConnPools(true /* already locked */) | |||
for _, pool := range pools { | |||
if pool != nil { | |||
pool.inUse = false | |||
} | |||
} | |||
newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList)) | |||
for i := range newcps { | |||
// get the old connection pool and check if it is still valid | |||
pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */) | |||
if pool != nil && pool.inUse == false { | |||
// if the hostname and index is unchanged then reuse this pool | |||
newcps[i] = pool | |||
pool.inUse = true | |||
continue | |||
} | |||
// else create a new pool | |||
if b.ah != nil { | |||
newcps[i] = newConnectionPool( | |||
tmpb.VBSMJson.ServerList[i], | |||
b.ah, false, PoolSize, PoolOverflow) | |||
} else { | |||
newcps[i] = newConnectionPool( | |||
tmpb.VBSMJson.ServerList[i], | |||
b.authHandler(true /* bucket already locked */), | |||
false, PoolSize, PoolOverflow) | |||
} | |||
} | |||
b.replaceConnPools2(newcps, true /* bucket already locked */) | |||
tmpb.ah = b.ah | |||
b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson) | |||
b.nodeList = unsafe.Pointer(&tmpb.NodesJSON) | |||
b.Scopes = scopes | |||
b.Unlock() | |||
logging.Infof("Got new configuration for bucket %s", b.GetName()) | |||
} | |||
// we are here because of an error | |||
failures++ | |||
continue | |||
} | |||
return nil | |||
} |
@ -0,0 +1,143 @@ | |||
package couchbase | |||
import ( | |||
"github.com/couchbase/gomemcached/client" | |||
"github.com/couchbase/goutils/logging" | |||
"sync" | |||
"time" | |||
) | |||
const initialRetryInterval = 1 * time.Second | |||
const maximumRetryInterval = 30 * time.Second | |||
// A TapFeed streams mutation events from a bucket. | |||
// | |||
// Events from the bucket can be read from the channel 'C'. Remember | |||
// to call Close() on it when you're done, unless its channel has | |||
// closed itself already. | |||
type TapFeed struct { | |||
C <-chan memcached.TapEvent | |||
bucket *Bucket | |||
args *memcached.TapArguments | |||
nodeFeeds []*memcached.TapFeed // The TAP feeds of the individual nodes | |||
output chan memcached.TapEvent // Same as C but writeably-typed | |||
wg sync.WaitGroup | |||
quit chan bool | |||
} | |||
// StartTapFeed creates and starts a new Tap feed | |||
func (b *Bucket) StartTapFeed(args *memcached.TapArguments) (*TapFeed, error) { | |||
if args == nil { | |||
defaultArgs := memcached.DefaultTapArguments() | |||
args = &defaultArgs | |||
} | |||
feed := &TapFeed{ | |||
bucket: b, | |||
args: args, | |||
output: make(chan memcached.TapEvent, 10), | |||
quit: make(chan bool), | |||
} | |||
go feed.run() | |||
feed.C = feed.output | |||
return feed, nil | |||
} | |||
// Goroutine that runs the feed | |||
func (feed *TapFeed) run() { | |||
retryInterval := initialRetryInterval | |||
bucketOK := true | |||
for { | |||
// Connect to the TAP feed of each server node: | |||
if bucketOK { | |||
killSwitch, err := feed.connectToNodes() | |||
if err == nil { | |||
// Run until one of the sub-feeds fails: | |||
select { | |||
case <-killSwitch: | |||
case <-feed.quit: | |||
return | |||
} | |||
feed.closeNodeFeeds() | |||
retryInterval = initialRetryInterval | |||
} | |||
} | |||
// On error, try to refresh the bucket in case the list of nodes changed: | |||
logging.Infof("go-couchbase: TAP connection lost; reconnecting to bucket %q in %v", | |||
feed.bucket.Name, retryInterval) | |||
err := feed.bucket.Refresh() | |||
bucketOK = err == nil | |||
select { | |||
case <-time.After(retryInterval): | |||
case <-feed.quit: | |||
return | |||
} | |||
if retryInterval *= 2; retryInterval > maximumRetryInterval { | |||
retryInterval = maximumRetryInterval | |||
} | |||
} | |||
} | |||
func (feed *TapFeed) connectToNodes() (killSwitch chan bool, err error) { | |||
killSwitch = make(chan bool) | |||
for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) { | |||
var singleFeed *memcached.TapFeed | |||
singleFeed, err = serverConn.StartTapFeed(feed.args) | |||
if err != nil { | |||
logging.Errorf("go-couchbase: Error connecting to tap feed of %s: %v", serverConn.host, err) | |||
feed.closeNodeFeeds() | |||
return | |||
} | |||
feed.nodeFeeds = append(feed.nodeFeeds, singleFeed) | |||
go feed.forwardTapEvents(singleFeed, killSwitch, serverConn.host) | |||
feed.wg.Add(1) | |||
} | |||
return | |||
} | |||
// Goroutine that forwards Tap events from a single node's feed to the aggregate feed. | |||
func (feed *TapFeed) forwardTapEvents(singleFeed *memcached.TapFeed, killSwitch chan bool, host string) { | |||
defer feed.wg.Done() | |||
for { | |||
select { | |||
case event, ok := <-singleFeed.C: | |||
if !ok { | |||
if singleFeed.Error != nil { | |||
logging.Errorf("go-couchbase: Tap feed from %s failed: %v", host, singleFeed.Error) | |||
} | |||
killSwitch <- true | |||
return | |||
} | |||
feed.output <- event | |||
case <-feed.quit: | |||
return | |||
} | |||
} | |||
} | |||
func (feed *TapFeed) closeNodeFeeds() { | |||
for _, f := range feed.nodeFeeds { | |||
f.Close() | |||
} | |||
feed.nodeFeeds = nil | |||
} | |||
// Close a Tap feed. | |||
func (feed *TapFeed) Close() error { | |||
select { | |||
case <-feed.quit: | |||
return nil | |||
default: | |||
} | |||
feed.closeNodeFeeds() | |||
close(feed.quit) | |||
feed.wg.Wait() | |||
close(feed.output) | |||
return nil | |||
} |
@ -0,0 +1,398 @@ | |||
package couchbase | |||
import ( | |||
"log" | |||
"sync" | |||
"time" | |||
"fmt" | |||
"github.com/couchbase/gomemcached" | |||
"github.com/couchbase/gomemcached/client" | |||
"github.com/couchbase/goutils/logging" | |||
) | |||
// A UprFeed streams mutation events from a bucket. | |||
// | |||
// Events from the bucket can be read from the channel 'C'. Remember | |||
// to call Close() on it when you're done, unless its channel has | |||
// closed itself already. | |||
type UprFeed struct { | |||
C <-chan *memcached.UprEvent | |||
bucket *Bucket | |||
nodeFeeds map[string]*FeedInfo // The UPR feeds of the individual nodes | |||
output chan *memcached.UprEvent // Same as C but writeably-typed | |||
outputClosed bool | |||
quit chan bool | |||
name string // name of this UPR feed | |||
sequence uint32 // sequence number for this feed | |||
connected bool | |||
killSwitch chan bool | |||
closing bool | |||
wg sync.WaitGroup | |||
dcp_buffer_size uint32 | |||
data_chan_size int | |||
} | |||
// UprFeed from a single connection | |||
type FeedInfo struct { | |||
uprFeed *memcached.UprFeed // UPR feed handle | |||
host string // hostname | |||
connected bool // connected | |||
quit chan bool // quit channel | |||
} | |||
type FailoverLog map[uint16]memcached.FailoverLog | |||
// GetFailoverLogs, get the failover logs for a set of vbucket ids | |||
func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) { | |||
// map vbids to their corresponding hosts | |||
vbHostList := make(map[string][]uint16) | |||
vbm := b.VBServerMap() | |||
if len(vbm.VBucketMap) < len(vBuckets) { | |||
return nil, fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", | |||
vbm.VBucketMap, vBuckets) | |||
} | |||
for _, vb := range vBuckets { | |||
masterID := vbm.VBucketMap[vb][0] | |||
master := b.getMasterNode(masterID) | |||
if master == "" { | |||
return nil, fmt.Errorf("No master found for vb %d", vb) | |||
} | |||
vbList := vbHostList[master] | |||
if vbList == nil { | |||
vbList = make([]uint16, 0) | |||
} | |||
vbList = append(vbList, vb) | |||
vbHostList[master] = vbList | |||
} | |||
failoverLogMap := make(FailoverLog) | |||
for _, serverConn := range b.getConnPools(false /* not already locked */) { | |||
vbList := vbHostList[serverConn.host] | |||
if vbList == nil { | |||
continue | |||
} | |||
mc, err := serverConn.Get() | |||
if err != nil { | |||
logging.Infof("No Free connections for vblist %v", vbList) | |||
return nil, fmt.Errorf("No Free connections for host %s", | |||
serverConn.host) | |||
} | |||
// close the connection so that it doesn't get reused for upr data | |||
// connection | |||
defer mc.Close() | |||
failoverlogs, err := mc.UprGetFailoverLog(vbList) | |||
if err != nil { | |||
return nil, fmt.Errorf("Error getting failover log %s host %s", | |||
err.Error(), serverConn.host) | |||
} | |||
for vb, log := range failoverlogs { | |||
failoverLogMap[vb] = *log | |||
} | |||
} | |||
return failoverLogMap, nil | |||
} | |||
func (b *Bucket) StartUprFeed(name string, sequence uint32) (*UprFeed, error) { | |||
return b.StartUprFeedWithConfig(name, sequence, 10, DEFAULT_WINDOW_SIZE) | |||
} | |||
// StartUprFeed creates and starts a new Upr feed | |||
// No data will be sent on the channel unless vbuckets streams are requested | |||
func (b *Bucket) StartUprFeedWithConfig(name string, sequence uint32, data_chan_size int, dcp_buffer_size uint32) (*UprFeed, error) { | |||
feed := &UprFeed{ | |||
bucket: b, | |||
output: make(chan *memcached.UprEvent, data_chan_size), | |||
quit: make(chan bool), | |||
nodeFeeds: make(map[string]*FeedInfo, 0), | |||
name: name, | |||
sequence: sequence, | |||
killSwitch: make(chan bool), | |||
dcp_buffer_size: dcp_buffer_size, | |||
data_chan_size: data_chan_size, | |||
} | |||
err := feed.connectToNodes() | |||
if err != nil { | |||
return nil, fmt.Errorf("Cannot connect to bucket %s", err.Error()) | |||
} | |||
feed.connected = true | |||
go feed.run() | |||
feed.C = feed.output | |||
return feed, nil | |||
} | |||
// UprRequestStream starts a stream for a vb on a feed | |||
func (feed *UprFeed) UprRequestStream(vb uint16, opaque uint16, flags uint32, | |||
vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error { | |||
defer func() { | |||
if r := recover(); r != nil { | |||
log.Panicf("Panic in UprRequestStream. Feed %v Bucket %v", feed, feed.bucket) | |||
} | |||
}() | |||
vbm := feed.bucket.VBServerMap() | |||
if len(vbm.VBucketMap) < int(vb) { | |||
return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", | |||
vb, vbm.VBucketMap) | |||
} | |||
if int(vb) >= len(vbm.VBucketMap) { | |||
return fmt.Errorf("Invalid vbucket id %d", vb) | |||
} | |||
masterID := vbm.VBucketMap[vb][0] | |||
master := feed.bucket.getMasterNode(masterID) | |||
if master == "" { | |||
return fmt.Errorf("Master node not found for vbucket %d", vb) | |||
} | |||
singleFeed := feed.nodeFeeds[master] | |||
if singleFeed == nil { | |||
return fmt.Errorf("UprFeed for this host not found") | |||
} | |||
if err := singleFeed.uprFeed.UprRequestStream(vb, opaque, flags, | |||
vuuid, startSequence, endSequence, snapStart, snapEnd); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
// UprCloseStream ends a vbucket stream. | |||
func (feed *UprFeed) UprCloseStream(vb, opaqueMSB uint16) error { | |||
defer func() { | |||
if r := recover(); r != nil { | |||
log.Panicf("Panic in UprCloseStream. Feed %v Bucket %v ", feed, feed.bucket) | |||
} | |||
}() | |||
vbm := feed.bucket.VBServerMap() | |||
if len(vbm.VBucketMap) < int(vb) { | |||
return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", | |||
vb, vbm.VBucketMap) | |||
} | |||
if int(vb) >= len(vbm.VBucketMap) { | |||
return fmt.Errorf("Invalid vbucket id %d", vb) | |||
} | |||
masterID := vbm.VBucketMap[vb][0] | |||
master := feed.bucket.getMasterNode(masterID) | |||
if master == "" { | |||
return fmt.Errorf("Master node not found for vbucket %d", vb) | |||
} | |||
singleFeed := feed.nodeFeeds[master] | |||
if singleFeed == nil { | |||
return fmt.Errorf("UprFeed for this host not found") | |||
} | |||
if err := singleFeed.uprFeed.CloseStream(vb, opaqueMSB); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
// Goroutine that runs the feed | |||
func (feed *UprFeed) run() { | |||
retryInterval := initialRetryInterval | |||
bucketOK := true | |||
for { | |||
// Connect to the UPR feed of each server node: | |||
if bucketOK { | |||
// Run until one of the sub-feeds fails: | |||
select { | |||
case <-feed.killSwitch: | |||
case <-feed.quit: | |||
return | |||
} | |||
//feed.closeNodeFeeds() | |||
retryInterval = initialRetryInterval | |||
} | |||
if feed.closing == true { | |||
// we have been asked to shut down | |||
return | |||
} | |||
// On error, try to refresh the bucket in case the list of nodes changed: | |||
logging.Infof("go-couchbase: UPR connection lost; reconnecting to bucket %q in %v", | |||
feed.bucket.Name, retryInterval) | |||
if err := feed.bucket.Refresh(); err != nil { | |||
// if we fail to refresh the bucket, exit the feed | |||
// MB-14917 | |||
logging.Infof("Unable to refresh bucket %s ", err.Error()) | |||
close(feed.output) | |||
feed.outputClosed = true | |||
feed.closeNodeFeeds() | |||
return | |||
} | |||
// this will only connect to nodes that are not connected or changed | |||
// user will have to reconnect the stream | |||
err := feed.connectToNodes() | |||
if err != nil { | |||
logging.Infof("Unable to connect to nodes..exit ") | |||
close(feed.output) | |||
feed.outputClosed = true | |||
feed.closeNodeFeeds() | |||
return | |||
} | |||
bucketOK = err == nil | |||
select { | |||
case <-time.After(retryInterval): | |||
case <-feed.quit: | |||
return | |||
} | |||
if retryInterval *= 2; retryInterval > maximumRetryInterval { | |||
retryInterval = maximumRetryInterval | |||
} | |||
} | |||
} | |||
func (feed *UprFeed) connectToNodes() (err error) { | |||
nodeCount := 0 | |||
for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) { | |||
// this maybe a reconnection, so check if the connection to the node | |||
// already exists. Connect only if the node is not found in the list | |||
// or connected == false | |||
nodeFeed := feed.nodeFeeds[serverConn.host] | |||
if nodeFeed != nil && nodeFeed.connected == true { | |||
continue | |||
} | |||
var singleFeed *memcached.UprFeed | |||
var name string | |||
if feed.name == "" { | |||
name = "DefaultUprClient" | |||
} else { | |||
name = feed.name | |||
} | |||
singleFeed, err = serverConn.StartUprFeed(name, feed.sequence, feed.dcp_buffer_size, feed.data_chan_size) | |||
if err != nil { | |||
logging.Errorf("go-couchbase: Error connecting to upr feed of %s: %v", serverConn.host, err) | |||
feed.closeNodeFeeds() | |||
return | |||
} | |||
// add the node to the connection map | |||
feedInfo := &FeedInfo{ | |||
uprFeed: singleFeed, | |||
connected: true, | |||
host: serverConn.host, | |||
quit: make(chan bool), | |||
} | |||
feed.nodeFeeds[serverConn.host] = feedInfo | |||
go feed.forwardUprEvents(feedInfo, feed.killSwitch, serverConn.host) | |||
feed.wg.Add(1) | |||
nodeCount++ | |||
} | |||
if nodeCount == 0 { | |||
return fmt.Errorf("No connection to bucket") | |||
} | |||
return nil | |||
} | |||
// Goroutine that forwards Upr events from a single node's feed to the aggregate feed. | |||
func (feed *UprFeed) forwardUprEvents(nodeFeed *FeedInfo, killSwitch chan bool, host string) { | |||
singleFeed := nodeFeed.uprFeed | |||
defer func() { | |||
feed.wg.Done() | |||
if r := recover(); r != nil { | |||
//if feed is not closing, re-throw the panic | |||
if feed.outputClosed != true && feed.closing != true { | |||
panic(r) | |||
} else { | |||
logging.Errorf("Panic is recovered. Since feed is closed, exit gracefully") | |||
} | |||
} | |||
}() | |||
for { | |||
select { | |||
case <-nodeFeed.quit: | |||
nodeFeed.connected = false | |||
return | |||
case event, ok := <-singleFeed.C: | |||
if !ok { | |||
if singleFeed.Error != nil { | |||
logging.Errorf("go-couchbase: Upr feed from %s failed: %v", host, singleFeed.Error) | |||
} | |||
killSwitch <- true | |||
return | |||
} | |||
if feed.outputClosed == true { | |||
// someone closed the node feed | |||
logging.Infof("Node need closed, returning from forwardUprEvent") | |||
return | |||
} | |||
feed.output <- event | |||
if event.Status == gomemcached.NOT_MY_VBUCKET { | |||
logging.Infof(" Got a not my vbucket error !! ") | |||
if err := feed.bucket.Refresh(); err != nil { | |||
logging.Errorf("Unable to refresh bucket %s ", err.Error()) | |||
feed.closeNodeFeeds() | |||
return | |||
} | |||
// this will only connect to nodes that are not connected or changed | |||
// user will have to reconnect the stream | |||
if err := feed.connectToNodes(); err != nil { | |||
logging.Errorf("Unable to connect to nodes %s", err.Error()) | |||
return | |||
} | |||
} | |||
} | |||
} | |||
} | |||
func (feed *UprFeed) closeNodeFeeds() { | |||
for _, f := range feed.nodeFeeds { | |||
logging.Infof(" Sending close to forwardUprEvent ") | |||
close(f.quit) | |||
f.uprFeed.Close() | |||
} | |||
feed.nodeFeeds = nil | |||
} | |||
// Close a Upr feed. | |||
func (feed *UprFeed) Close() error { | |||
select { | |||
case <-feed.quit: | |||
return nil | |||
default: | |||
} | |||
feed.closing = true | |||
feed.closeNodeFeeds() | |||
close(feed.quit) | |||
feed.wg.Wait() | |||
if feed.outputClosed == false { | |||
feed.outputClosed = true | |||
close(feed.output) | |||
} | |||
return nil | |||
} |
@ -0,0 +1,119 @@ | |||
package couchbase | |||
import ( | |||
"bytes" | |||
"fmt" | |||
) | |||
type User struct { | |||
Name string | |||
Id string | |||
Domain string | |||
Roles []Role | |||
} | |||
type Role struct { | |||
Role string | |||
BucketName string `json:"bucket_name"` | |||
} | |||
// Sample: | |||
// {"role":"admin","name":"Admin","desc":"Can manage ALL cluster features including security.","ce":true} | |||
// {"role":"query_select","bucket_name":"*","name":"Query Select","desc":"Can execute SELECT statement on bucket to retrieve data"} | |||
type RoleDescription struct { | |||
Role string | |||
Name string | |||
Desc string | |||
Ce bool | |||
BucketName string `json:"bucket_name"` | |||
} | |||
// Return user-role data, as parsed JSON. | |||
// Sample: | |||
// [{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]}, | |||
// {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}] | |||
func (c *Client) GetUserRoles() ([]interface{}, error) { | |||
ret := make([]interface{}, 0, 1) | |||
err := c.parseURLResponse("/settings/rbac/users", &ret) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Get the configured administrator. | |||
// Expected result: {"port":8091,"username":"Administrator"} | |||
adminInfo := make(map[string]interface{}, 2) | |||
err = c.parseURLResponse("/settings/web", &adminInfo) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Create a special entry for the configured administrator. | |||
adminResult := map[string]interface{}{ | |||
"name": adminInfo["username"], | |||
"id": adminInfo["username"], | |||
"domain": "ns_server", | |||
"roles": []interface{}{ | |||
map[string]interface{}{ | |||
"role": "admin", | |||
}, | |||
}, | |||
} | |||
// Add the configured administrator to the list of results. | |||
ret = append(ret, adminResult) | |||
return ret, nil | |||
} | |||
func (c *Client) GetUserInfoAll() ([]User, error) { | |||
ret := make([]User, 0, 16) | |||
err := c.parseURLResponse("/settings/rbac/users", &ret) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return ret, nil | |||
} | |||
func rolesToParamFormat(roles []Role) string { | |||
var buffer bytes.Buffer | |||
for i, role := range roles { | |||
if i > 0 { | |||
buffer.WriteString(",") | |||
} | |||
buffer.WriteString(role.Role) | |||
if role.BucketName != "" { | |||
buffer.WriteString("[") | |||
buffer.WriteString(role.BucketName) | |||
buffer.WriteString("]") | |||
} | |||
} | |||
return buffer.String() | |||
} | |||
func (c *Client) PutUserInfo(u *User) error { | |||
params := map[string]interface{}{ | |||
"name": u.Name, | |||
"roles": rolesToParamFormat(u.Roles), | |||
} | |||
var target string | |||
switch u.Domain { | |||
case "external": | |||
target = "/settings/rbac/users/" + u.Id | |||
case "local": | |||
target = "/settings/rbac/users/local/" + u.Id | |||
default: | |||
return fmt.Errorf("Unknown user type: %s", u.Domain) | |||
} | |||
var ret string // PUT returns an empty string. We ignore it. | |||
err := c.parsePutURLResponse(target, params, &ret) | |||
return err | |||
} | |||
func (c *Client) GetRolesAll() ([]RoleDescription, error) { | |||
ret := make([]RoleDescription, 0, 32) | |||
err := c.parseURLResponse("/settings/rbac/roles", &ret) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return ret, nil | |||
} |
@ -0,0 +1,49 @@ | |||
package couchbase | |||
import ( | |||
"fmt" | |||
"net/url" | |||
"strings" | |||
) | |||
// CleanupHost returns the hostname with the given suffix removed. | |||
func CleanupHost(h, commonSuffix string) string { | |||
if strings.HasSuffix(h, commonSuffix) { | |||
return h[:len(h)-len(commonSuffix)] | |||
} | |||
return h | |||
} | |||
// FindCommonSuffix returns the longest common suffix from the given | |||
// strings. | |||
func FindCommonSuffix(input []string) string { | |||
rv := "" | |||
if len(input) < 2 { | |||
return "" | |||
} | |||
from := input | |||
for i := len(input[0]); i > 0; i-- { | |||
common := true | |||
suffix := input[0][i:] | |||
for _, s := range from { | |||
if !strings.HasSuffix(s, suffix) { | |||
common = false | |||
break | |||
} | |||
} | |||
if common { | |||
rv = suffix | |||
} | |||
} | |||
return rv | |||
} | |||
// ParseURL is a wrapper around url.Parse with some sanity-checking | |||
func ParseURL(urlStr string) (result *url.URL, err error) { | |||
result, err = url.Parse(urlStr) | |||
if result != nil && result.Scheme == "" { | |||
result = nil | |||
err = fmt.Errorf("invalid URL <%s>", urlStr) | |||
} | |||
return | |||
} |
@ -0,0 +1,77 @@ | |||
package couchbase | |||
var crc32tab = []uint32{ | |||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, | |||
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, | |||
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, | |||
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, | |||
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, | |||
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, | |||
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, | |||
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, | |||
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, | |||
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, | |||
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, | |||
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, | |||
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, | |||
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, | |||
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, | |||
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, | |||
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, | |||
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, | |||
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, | |||
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, | |||
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, | |||
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, | |||
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, | |||
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, | |||
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, | |||
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, | |||
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, | |||
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, | |||
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, | |||
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, | |||
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, | |||
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, | |||
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, | |||
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, | |||
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, | |||
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, | |||
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, | |||
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, | |||
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, | |||
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, | |||
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, | |||
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, | |||
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, | |||
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, | |||
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, | |||
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, | |||
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, | |||
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, | |||
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, | |||
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, | |||
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, | |||
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, | |||
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, | |||
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, | |||
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, | |||
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, | |||
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, | |||
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, | |||
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, | |||
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, | |||
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, | |||
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, | |||
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, | |||
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d} | |||
// VBHash finds the vbucket for the given key. | |||
func (b *Bucket) VBHash(key string) uint32 { | |||
crc := uint32(0xffffffff) | |||
for x := 0; x < len(key); x++ { | |||
crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff] | |||
} | |||
vbm := b.VBServerMap() | |||
return ((^crc) >> 16) & 0x7fff & (uint32(len(vbm.VBucketMap)) - 1) | |||
} |
@ -0,0 +1,231 @@ | |||
package couchbase | |||
import ( | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"io/ioutil" | |||
"math/rand" | |||
"net/http" | |||
"net/url" | |||
"time" | |||
) | |||
// ViewRow represents a single result from a view. | |||
// | |||
// Doc is present only if include_docs was set on the request. | |||
type ViewRow struct { | |||
ID string | |||
Key interface{} | |||
Value interface{} | |||
Doc *interface{} | |||
} | |||
// A ViewError is a node-specific error indicating a partial failure | |||
// within a view result. | |||
type ViewError struct { | |||
From string | |||
Reason string | |||
} | |||
func (ve ViewError) Error() string { | |||
return "Node: " + ve.From + ", reason: " + ve.Reason | |||
} | |||
// ViewResult holds the entire result set from a view request, | |||
// including the rows and the errors. | |||
type ViewResult struct { | |||
TotalRows int `json:"total_rows"` | |||
Rows []ViewRow | |||
Errors []ViewError | |||
} | |||
func (b *Bucket) randomBaseURL() (*url.URL, error) { | |||
nodes := b.HealthyNodes() | |||
if len(nodes) == 0 { | |||
return nil, errors.New("no available couch rest URLs") | |||
} | |||
nodeNo := rand.Intn(len(nodes)) | |||
node := nodes[nodeNo] | |||
b.RLock() | |||
name := b.Name | |||
pool := b.pool | |||
b.RUnlock() | |||
u, err := ParseURL(node.CouchAPIBase) | |||
if err != nil { | |||
return nil, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v", | |||
name, nodeNo, node.CouchAPIBase, err) | |||
} else if pool != nil { | |||
u.User = pool.client.BaseURL.User | |||
} | |||
return u, err | |||
} | |||
const START_NODE_ID = -1 | |||
func (b *Bucket) randomNextURL(lastNode int) (*url.URL, int, error) { | |||
nodes := b.HealthyNodes() | |||
if len(nodes) == 0 { | |||
return nil, -1, errors.New("no available couch rest URLs") | |||
} | |||
var nodeNo int | |||
if lastNode == START_NODE_ID || lastNode >= len(nodes) { | |||
// randomly select a node if the value of lastNode is invalid | |||
nodeNo = rand.Intn(len(nodes)) | |||
} else { | |||
// wrap around the node list | |||
nodeNo = (lastNode + 1) % len(nodes) | |||
} | |||
b.RLock() | |||
name := b.Name | |||
pool := b.pool | |||
b.RUnlock() | |||
node := nodes[nodeNo] | |||
u, err := ParseURL(node.CouchAPIBase) | |||
if err != nil { | |||
return nil, -1, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v", | |||
name, nodeNo, node.CouchAPIBase, err) | |||
} else if pool != nil { | |||
u.User = pool.client.BaseURL.User | |||
} | |||
return u, nodeNo, err | |||
} | |||
// DocID is the document ID type for the startkey_docid parameter in | |||
// views. | |||
type DocID string | |||
func qParam(k, v string) string { | |||
format := `"%s"` | |||
switch k { | |||
case "startkey_docid", "endkey_docid", "stale": | |||
format = "%s" | |||
} | |||
return fmt.Sprintf(format, v) | |||
} | |||
// ViewURL constructs a URL for a view with the given ddoc, view name, | |||
// and parameters. | |||
func (b *Bucket) ViewURL(ddoc, name string, | |||
params map[string]interface{}) (string, error) { | |||
u, err := b.randomBaseURL() | |||
if err != nil { | |||
return "", err | |||
} | |||
values := url.Values{} | |||
for k, v := range params { | |||
switch t := v.(type) { | |||
case DocID: | |||
values[k] = []string{string(t)} | |||
case string: | |||
values[k] = []string{qParam(k, t)} | |||
case int: | |||
values[k] = []string{fmt.Sprintf(`%d`, t)} | |||
case bool: | |||
values[k] = []string{fmt.Sprintf(`%v`, t)} | |||
default: | |||
b, err := json.Marshal(v) | |||
if err != nil { | |||
return "", fmt.Errorf("unsupported value-type %T in Query, "+ | |||
"json encoder said %v", t, err) | |||
} | |||
values[k] = []string{fmt.Sprintf(`%v`, string(b))} | |||
} | |||
} | |||
if ddoc == "" && name == "_all_docs" { | |||
u.Path = fmt.Sprintf("/%s/_all_docs", b.GetName()) | |||
} else { | |||
u.Path = fmt.Sprintf("/%s/_design/%s/_view/%s", b.GetName(), ddoc, name) | |||
} | |||
u.RawQuery = values.Encode() | |||
return u.String(), nil | |||
} | |||
// ViewCallback is called for each view invocation. | |||
var ViewCallback func(ddoc, name string, start time.Time, err error) | |||
// ViewCustom performs a view request that can map row values to a | |||
// custom type. | |||
// | |||
// See the source to View for an example usage. | |||
func (b *Bucket) ViewCustom(ddoc, name string, params map[string]interface{}, | |||
vres interface{}) (err error) { | |||
if SlowServerCallWarningThreshold > 0 { | |||
defer slowLog(time.Now(), "call to ViewCustom(%q, %q)", ddoc, name) | |||
} | |||
if ViewCallback != nil { | |||
defer func(t time.Time) { ViewCallback(ddoc, name, t, err) }(time.Now()) | |||
} | |||
u, err := b.ViewURL(ddoc, name, params) | |||
if err != nil { | |||
return err | |||
} | |||
req, err := http.NewRequest("GET", u, nil) | |||
if err != nil { | |||
return err | |||
} | |||
ah := b.authHandler(false /* bucket not yet locked */) | |||
maybeAddAuth(req, ah) | |||
res, err := doHTTPRequest(req) | |||
if err != nil { | |||
return fmt.Errorf("error starting view req at %v: %v", u, err) | |||
} | |||
defer res.Body.Close() | |||
if res.StatusCode != 200 { | |||
bod := make([]byte, 512) | |||
l, _ := res.Body.Read(bod) | |||
return fmt.Errorf("error executing view req at %v: %v - %s", | |||
u, res.Status, bod[:l]) | |||
} | |||
body, err := ioutil.ReadAll(res.Body) | |||
if err := json.Unmarshal(body, vres); err != nil { | |||
return nil | |||
} | |||
return nil | |||
} | |||
// View executes a view. | |||
// | |||
// The ddoc parameter is just the bare name of your design doc without | |||
// the "_design/" prefix. | |||
// | |||
// Parameters are string keys with values that correspond to couchbase | |||
// view parameters. Primitive should work fairly naturally (booleans, | |||
// ints, strings, etc...) and other values will attempt to be JSON | |||
// marshaled (useful for array indexing on on view keys, for example). | |||
// | |||
// Example: | |||
// | |||
// res, err := couchbase.View("myddoc", "myview", map[string]interface{}{ | |||
// "group_level": 2, | |||
// "startkey_docid": []interface{}{"thing"}, | |||
// "endkey_docid": []interface{}{"thing", map[string]string{}}, | |||
// "stale": false, | |||
// }) | |||
func (b *Bucket) View(ddoc, name string, params map[string]interface{}) (ViewResult, error) { | |||
vres := ViewResult{} | |||
if err := b.ViewCustom(ddoc, name, params, &vres); err != nil { | |||
//error in accessing views. Retry once after a bucket refresh | |||
b.Refresh() | |||
return vres, b.ViewCustom(ddoc, name, params, &vres) | |||
} else { | |||
return vres, nil | |||
} | |||
} |
@ -0,0 +1,228 @@ | |||
// Copyright 2013 Beego Authors | |||
// Copyright 2014 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"strings" | |||
"sync" | |||
"github.com/couchbaselabs/go-couchbase" | |||
"github.com/go-macaron/session" | |||
) | |||
// CouchbaseSessionStore represents a couchbase session store implementation. | |||
type CouchbaseSessionStore struct { | |||
b *couchbase.Bucket | |||
sid string | |||
lock sync.RWMutex | |||
data map[interface{}]interface{} | |||
maxlifetime int64 | |||
} | |||
// Set sets value to given key in session. | |||
func (s *CouchbaseSessionStore) Set(key, val interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data[key] = val | |||
return nil | |||
} | |||
// Get gets value by given key in session. | |||
func (s *CouchbaseSessionStore) Get(key interface{}) interface{} { | |||
s.lock.RLock() | |||
defer s.lock.RUnlock() | |||
return s.data[key] | |||
} | |||
// Delete delete a key from session. | |||
func (s *CouchbaseSessionStore) Delete(key interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
delete(s.data, key) | |||
return nil | |||
} | |||
// ID returns current session ID. | |||
func (s *CouchbaseSessionStore) ID() string { | |||
return s.sid | |||
} | |||
// Release releases resource and save data to provider. | |||
func (s *CouchbaseSessionStore) Release() error { | |||
defer s.b.Close() | |||
// Skip encoding if the data is empty | |||
if len(s.data) == 0 { | |||
return nil | |||
} | |||
data, err := session.EncodeGob(s.data) | |||
if err != nil { | |||
return err | |||
} | |||
return s.b.Set(s.sid, int(s.maxlifetime), data) | |||
} | |||
// Flush deletes all session data. | |||
func (s *CouchbaseSessionStore) Flush() error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data = make(map[interface{}]interface{}) | |||
return nil | |||
} | |||
// CouchbaseProvider represents a couchbase session provider implementation. | |||
type CouchbaseProvider struct { | |||
maxlifetime int64 | |||
connStr string | |||
pool string | |||
bucket string | |||
b *couchbase.Bucket | |||
} | |||
func (cp *CouchbaseProvider) getBucket() *couchbase.Bucket { | |||
c, err := couchbase.Connect(cp.connStr) | |||
if err != nil { | |||
return nil | |||
} | |||
pool, err := c.GetPool(cp.pool) | |||
if err != nil { | |||
return nil | |||
} | |||
bucket, err := pool.GetBucket(cp.bucket) | |||
if err != nil { | |||
return nil | |||
} | |||
return bucket | |||
} | |||
// Init initializes memory session provider. | |||
// connStr is couchbase server REST/JSON URL | |||
// e.g. http://host:port/, Pool, Bucket | |||
func (p *CouchbaseProvider) Init(maxlifetime int64, connStr string) error { | |||
p.maxlifetime = maxlifetime | |||
configs := strings.Split(connStr, ",") | |||
if len(configs) > 0 { | |||
p.connStr = configs[0] | |||
} | |||
if len(configs) > 1 { | |||
p.pool = configs[1] | |||
} | |||
if len(configs) > 2 { | |||
p.bucket = configs[2] | |||
} | |||
return nil | |||
} | |||
// Read returns raw session store by session ID. | |||
func (p *CouchbaseProvider) Read(sid string) (session.RawStore, error) { | |||
p.b = p.getBucket() | |||
var doc []byte | |||
err := p.b.Get(sid, &doc) | |||
var kv map[interface{}]interface{} | |||
if doc == nil { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(doc) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
cs := &CouchbaseSessionStore{b: p.b, sid: sid, data: kv, maxlifetime: p.maxlifetime} | |||
return cs, nil | |||
} | |||
// Exist returns true if session with given ID exists. | |||
func (p *CouchbaseProvider) Exist(sid string) bool { | |||
p.b = p.getBucket() | |||
defer p.b.Close() | |||
var doc []byte | |||
if err := p.b.Get(sid, &doc); err != nil || doc == nil { | |||
return false | |||
} else { | |||
return true | |||
} | |||
} | |||
// Destory deletes a session by session ID. | |||
func (p *CouchbaseProvider) Destory(sid string) error { | |||
p.b = p.getBucket() | |||
defer p.b.Close() | |||
p.b.Delete(sid) | |||
return nil | |||
} | |||
// Regenerate regenerates a session store from old session ID to new one. | |||
func (p *CouchbaseProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { | |||
p.b = p.getBucket() | |||
var doc []byte | |||
if err := p.b.Get(oldsid, &doc); err != nil || doc == nil { | |||
p.b.Set(sid, int(p.maxlifetime), "") | |||
} else { | |||
err := p.b.Delete(oldsid) | |||
if err != nil { | |||
return nil, err | |||
} | |||
_, _ = p.b.Add(sid, int(p.maxlifetime), doc) | |||
} | |||
err := p.b.Get(sid, &doc) | |||
if err != nil { | |||
return nil, err | |||
} | |||
var kv map[interface{}]interface{} | |||
if doc == nil { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(doc) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
cs := &CouchbaseSessionStore{b: p.b, sid: sid, data: kv, maxlifetime: p.maxlifetime} | |||
return cs, nil | |||
} | |||
// Count counts and returns number of sessions. | |||
func (p *CouchbaseProvider) Count() int { | |||
// FIXME | |||
return 0 | |||
} | |||
// GC calls GC to clean expired sessions. | |||
func (p *CouchbaseProvider) GC() {} | |||
func init() { | |||
session.Register("couchbase", &CouchbaseProvider{}) | |||
} |
@ -0,0 +1,61 @@ | |||
// Copyright 2018 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"net/url" | |||
"gopkg.in/macaron.v1" | |||
) | |||
type Flash struct { | |||
ctx *macaron.Context | |||
url.Values | |||
ErrorMsg, WarningMsg, InfoMsg, SuccessMsg string | |||
} | |||
func (f *Flash) set(name, msg string, current ...bool) { | |||
isShow := false | |||
if (len(current) == 0 && macaron.FlashNow) || | |||
(len(current) > 0 && current[0]) { | |||
isShow = true | |||
} | |||
if isShow { | |||
f.ctx.Data["Flash"] = f | |||
} else { | |||
f.Set(name, msg) | |||
} | |||
} | |||
func (f *Flash) Error(msg string, current ...bool) { | |||
f.ErrorMsg = msg | |||
f.set("error", msg, current...) | |||
} | |||
func (f *Flash) Warning(msg string, current ...bool) { | |||
f.WarningMsg = msg | |||
f.set("warning", msg, current...) | |||
} | |||
func (f *Flash) Info(msg string, current ...bool) { | |||
f.InfoMsg = msg | |||
f.set("info", msg, current...) | |||
} | |||
func (f *Flash) Success(msg string, current ...bool) { | |||
f.SuccessMsg = msg | |||
f.set("success", msg, current...) | |||
} |
@ -0,0 +1,204 @@ | |||
// Copyright 2013 Beego Authors | |||
// Copyright 2014 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"fmt" | |||
"strings" | |||
"sync" | |||
"github.com/bradfitz/gomemcache/memcache" | |||
"github.com/go-macaron/session" | |||
) | |||
// MemcacheStore represents a memcache session store implementation. | |||
type MemcacheStore struct { | |||
c *memcache.Client | |||
sid string | |||
expire int32 | |||
lock sync.RWMutex | |||
data map[interface{}]interface{} | |||
} | |||
// NewMemcacheStore creates and returns a memcache session store. | |||
func NewMemcacheStore(c *memcache.Client, sid string, expire int32, kv map[interface{}]interface{}) *MemcacheStore { | |||
return &MemcacheStore{ | |||
c: c, | |||
sid: sid, | |||
expire: expire, | |||
data: kv, | |||
} | |||
} | |||
func NewItem(sid string, data []byte, expire int32) *memcache.Item { | |||
return &memcache.Item{ | |||
Key: sid, | |||
Value: data, | |||
Expiration: expire, | |||
} | |||
} | |||
// Set sets value to given key in session. | |||
func (s *MemcacheStore) Set(key, val interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data[key] = val | |||
return nil | |||
} | |||
// Get gets value by given key in session. | |||
func (s *MemcacheStore) Get(key interface{}) interface{} { | |||
s.lock.RLock() | |||
defer s.lock.RUnlock() | |||
return s.data[key] | |||
} | |||
// Delete delete a key from session. | |||
func (s *MemcacheStore) Delete(key interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
delete(s.data, key) | |||
return nil | |||
} | |||
// ID returns current session ID. | |||
func (s *MemcacheStore) ID() string { | |||
return s.sid | |||
} | |||
// Release releases resource and save data to provider. | |||
func (s *MemcacheStore) Release() error { | |||
// Skip encoding if the data is empty | |||
if len(s.data) == 0 { | |||
return nil | |||
} | |||
data, err := session.EncodeGob(s.data) | |||
if err != nil { | |||
return err | |||
} | |||
return s.c.Set(NewItem(s.sid, data, s.expire)) | |||
} | |||
// Flush deletes all session data. | |||
func (s *MemcacheStore) Flush() error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data = make(map[interface{}]interface{}) | |||
return nil | |||
} | |||
// MemcacheProvider represents a memcache session provider implementation. | |||
type MemcacheProvider struct { | |||
c *memcache.Client | |||
expire int32 | |||
} | |||
// Init initializes memcache session provider. | |||
// connStrs: 127.0.0.1:9090;127.0.0.1:9091 | |||
func (p *MemcacheProvider) Init(expire int64, connStrs string) error { | |||
p.expire = int32(expire) | |||
p.c = memcache.New(strings.Split(connStrs, ";")...) | |||
return nil | |||
} | |||
// Read returns raw session store by session ID. | |||
func (p *MemcacheProvider) Read(sid string) (session.RawStore, error) { | |||
if !p.Exist(sid) { | |||
if err := p.c.Set(NewItem(sid, []byte(""), p.expire)); err != nil { | |||
return nil, err | |||
} | |||
} | |||
var kv map[interface{}]interface{} | |||
item, err := p.c.Get(sid) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if len(item.Value) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(item.Value) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewMemcacheStore(p.c, sid, p.expire, kv), nil | |||
} | |||
// Exist returns true if session with given ID exists. | |||
func (p *MemcacheProvider) Exist(sid string) bool { | |||
_, err := p.c.Get(sid) | |||
return err == nil | |||
} | |||
// Destory deletes a session by session ID. | |||
func (p *MemcacheProvider) Destory(sid string) error { | |||
return p.c.Delete(sid) | |||
} | |||
// Regenerate regenerates a session store from old session ID to new one. | |||
func (p *MemcacheProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { | |||
if p.Exist(sid) { | |||
return nil, fmt.Errorf("new sid '%s' already exists", sid) | |||
} | |||
item := NewItem(sid, []byte(""), p.expire) | |||
if p.Exist(oldsid) { | |||
item, err = p.c.Get(oldsid) | |||
if err != nil { | |||
return nil, err | |||
} else if err = p.c.Delete(oldsid); err != nil { | |||
return nil, err | |||
} | |||
item.Key = sid | |||
} | |||
if err = p.c.Set(item); err != nil { | |||
return nil, err | |||
} | |||
var kv map[interface{}]interface{} | |||
if len(item.Value) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(item.Value) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewMemcacheStore(p.c, sid, p.expire, kv), nil | |||
} | |||
// Count counts and returns number of sessions. | |||
func (p *MemcacheProvider) Count() int { | |||
// FIXME: how come this library does not have Stats method? | |||
return -1 | |||
} | |||
// GC calls GC to clean expired sessions. | |||
func (p *MemcacheProvider) GC() {} | |||
func init() { | |||
session.Register("memcache", &MemcacheProvider{}) | |||
} |
@ -0,0 +1,200 @@ | |||
// Copyright 2013 Beego Authors | |||
// Copyright 2014 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"database/sql" | |||
"fmt" | |||
"log" | |||
"sync" | |||
"time" | |||
_ "github.com/go-sql-driver/mysql" | |||
"github.com/go-macaron/session" | |||
) | |||
// MysqlStore represents a mysql session store implementation. | |||
type MysqlStore struct { | |||
c *sql.DB | |||
sid string | |||
lock sync.RWMutex | |||
data map[interface{}]interface{} | |||
} | |||
// NewMysqlStore creates and returns a mysql session store. | |||
func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *MysqlStore { | |||
return &MysqlStore{ | |||
c: c, | |||
sid: sid, | |||
data: kv, | |||
} | |||
} | |||
// Set sets value to given key in session. | |||
func (s *MysqlStore) Set(key, val interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data[key] = val | |||
return nil | |||
} | |||
// Get gets value by given key in session. | |||
func (s *MysqlStore) Get(key interface{}) interface{} { | |||
s.lock.RLock() | |||
defer s.lock.RUnlock() | |||
return s.data[key] | |||
} | |||
// Delete delete a key from session. | |||
func (s *MysqlStore) Delete(key interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
delete(s.data, key) | |||
return nil | |||
} | |||
// ID returns current session ID. | |||
func (s *MysqlStore) ID() string { | |||
return s.sid | |||
} | |||
// Release releases resource and save data to provider. | |||
func (s *MysqlStore) Release() error { | |||
// Skip encoding if the data is empty | |||
if len(s.data) == 0 { | |||
return nil | |||
} | |||
data, err := session.EncodeGob(s.data) | |||
if err != nil { | |||
return err | |||
} | |||
_, err = s.c.Exec("UPDATE session SET data=?, expiry=? WHERE `key`=?", | |||
data, time.Now().Unix(), s.sid) | |||
return err | |||
} | |||
// Flush deletes all session data. | |||
func (s *MysqlStore) Flush() error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data = make(map[interface{}]interface{}) | |||
return nil | |||
} | |||
// MysqlProvider represents a mysql session provider implementation. | |||
type MysqlProvider struct { | |||
c *sql.DB | |||
expire int64 | |||
} | |||
// Init initializes mysql session provider. | |||
// connStr: username:password@protocol(address)/dbname?param=value | |||
func (p *MysqlProvider) Init(expire int64, connStr string) (err error) { | |||
p.expire = expire | |||
p.c, err = sql.Open("mysql", connStr) | |||
if err != nil { | |||
return err | |||
} | |||
return p.c.Ping() | |||
} | |||
// Read returns raw session store by session ID. | |||
func (p *MysqlProvider) Read(sid string) (session.RawStore, error) { | |||
var data []byte | |||
err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data) | |||
if err == sql.ErrNoRows { | |||
_, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)", | |||
sid, "", time.Now().Unix()) | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
var kv map[interface{}]interface{} | |||
if len(data) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(data) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewMysqlStore(p.c, sid, kv), nil | |||
} | |||
// Exist returns true if session with given ID exists. | |||
func (p *MysqlProvider) Exist(sid string) bool { | |||
var data []byte | |||
err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data) | |||
if err != nil && err != sql.ErrNoRows { | |||
panic("session/mysql: error checking existence: " + err.Error()) | |||
} | |||
return err != sql.ErrNoRows | |||
} | |||
// Destory deletes a session by session ID. | |||
func (p *MysqlProvider) Destory(sid string) error { | |||
_, err := p.c.Exec("DELETE FROM session WHERE `key`=?", sid) | |||
return err | |||
} | |||
// Regenerate regenerates a session store from old session ID to new one. | |||
func (p *MysqlProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { | |||
if p.Exist(sid) { | |||
return nil, fmt.Errorf("new sid '%s' already exists", sid) | |||
} | |||
if !p.Exist(oldsid) { | |||
if _, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)", | |||
oldsid, "", time.Now().Unix()); err != nil { | |||
return nil, err | |||
} | |||
} | |||
if _, err = p.c.Exec("UPDATE session SET `key`=? WHERE `key`=?", sid, oldsid); err != nil { | |||
return nil, err | |||
} | |||
return p.Read(sid) | |||
} | |||
// Count counts and returns number of sessions. | |||
func (p *MysqlProvider) Count() (total int) { | |||
if err := p.c.QueryRow("SELECT COUNT(*) AS NUM FROM session").Scan(&total); err != nil { | |||
panic("session/mysql: error counting records: " + err.Error()) | |||
} | |||
return total | |||
} | |||
// GC calls GC to clean expired sessions. | |||
func (p *MysqlProvider) GC() { | |||
if _, err := p.c.Exec("DELETE FROM session WHERE expiry + ? <= UNIX_TIMESTAMP(NOW())", p.expire); err != nil { | |||
log.Printf("session/mysql: error garbage collecting: %v", err) | |||
} | |||
} | |||
func init() { | |||
session.Register("mysql", &MysqlProvider{}) | |||
} |
@ -0,0 +1,208 @@ | |||
// Copyright 2015 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"fmt" | |||
"sync" | |||
"github.com/lunny/nodb" | |||
"github.com/lunny/nodb/config" | |||
"github.com/go-macaron/session" | |||
) | |||
// NodbStore represents a nodb session store implementation. | |||
type NodbStore struct { | |||
c *nodb.DB | |||
sid string | |||
expire int64 | |||
lock sync.RWMutex | |||
data map[interface{}]interface{} | |||
} | |||
// NewNodbStore creates and returns a ledis session store. | |||
func NewNodbStore(c *nodb.DB, sid string, expire int64, kv map[interface{}]interface{}) *NodbStore { | |||
return &NodbStore{ | |||
c: c, | |||
expire: expire, | |||
sid: sid, | |||
data: kv, | |||
} | |||
} | |||
// Set sets value to given key in session. | |||
func (s *NodbStore) Set(key, val interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data[key] = val | |||
return nil | |||
} | |||
// Get gets value by given key in session. | |||
func (s *NodbStore) Get(key interface{}) interface{} { | |||
s.lock.RLock() | |||
defer s.lock.RUnlock() | |||
return s.data[key] | |||
} | |||
// Delete delete a key from session. | |||
func (s *NodbStore) Delete(key interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
delete(s.data, key) | |||
return nil | |||
} | |||
// ID returns current session ID. | |||
func (s *NodbStore) ID() string { | |||
return s.sid | |||
} | |||
// Release releases resource and save data to provider. | |||
func (s *NodbStore) Release() error { | |||
// Skip encoding if the data is empty | |||
if len(s.data) == 0 { | |||
return nil | |||
} | |||
data, err := session.EncodeGob(s.data) | |||
if err != nil { | |||
return err | |||
} | |||
if err = s.c.Set([]byte(s.sid), data); err != nil { | |||
return err | |||
} | |||
_, err = s.c.Expire([]byte(s.sid), s.expire) | |||
return err | |||
} | |||
// Flush deletes all session data. | |||
func (s *NodbStore) Flush() error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data = make(map[interface{}]interface{}) | |||
return nil | |||
} | |||
// NodbProvider represents a ledis session provider implementation. | |||
type NodbProvider struct { | |||
c *nodb.DB | |||
expire int64 | |||
} | |||
// Init initializes nodb session provider. | |||
func (p *NodbProvider) Init(expire int64, configs string) error { | |||
p.expire = expire | |||
cfg := new(config.Config) | |||
cfg.DataDir = configs | |||
dbs, err := nodb.Open(cfg) | |||
if err != nil { | |||
return fmt.Errorf("session/nodb: error opening db: %v", err) | |||
} | |||
p.c, err = dbs.Select(0) | |||
return err | |||
} | |||
// Read returns raw session store by session ID. | |||
func (p *NodbProvider) Read(sid string) (session.RawStore, error) { | |||
if !p.Exist(sid) { | |||
if err := p.c.Set([]byte(sid), []byte("")); err != nil { | |||
return nil, err | |||
} | |||
} | |||
var kv map[interface{}]interface{} | |||
kvs, err := p.c.Get([]byte(sid)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if len(kvs) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(kvs) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewNodbStore(p.c, sid, p.expire, kv), nil | |||
} | |||
// Exist returns true if session with given ID exists. | |||
func (p *NodbProvider) Exist(sid string) bool { | |||
count, err := p.c.Exists([]byte(sid)) | |||
return err == nil && count > 0 | |||
} | |||
// Destory deletes a session by session ID. | |||
func (p *NodbProvider) Destory(sid string) error { | |||
_, err := p.c.Del([]byte(sid)) | |||
return err | |||
} | |||
// Regenerate regenerates a session store from old session ID to new one. | |||
func (p *NodbProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { | |||
if p.Exist(sid) { | |||
return nil, fmt.Errorf("new sid '%s' already exists", sid) | |||
} | |||
kvs := make([]byte, 0) | |||
if p.Exist(oldsid) { | |||
if kvs, err = p.c.Get([]byte(oldsid)); err != nil { | |||
return nil, err | |||
} else if _, err = p.c.Del([]byte(oldsid)); err != nil { | |||
return nil, err | |||
} | |||
} | |||
if err = p.c.Set([]byte(sid), kvs); err != nil { | |||
return nil, err | |||
} else if _, err = p.c.Expire([]byte(sid), p.expire); err != nil { | |||
return nil, err | |||
} | |||
var kv map[interface{}]interface{} | |||
if len(kvs) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob([]byte(kvs)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewNodbStore(p.c, sid, p.expire, kv), nil | |||
} | |||
// Count counts and returns number of sessions. | |||
func (p *NodbProvider) Count() int { | |||
// FIXME: how come this library does not have DbSize() method? | |||
return -1 | |||
} | |||
// GC calls GC to clean expired sessions. | |||
func (p *NodbProvider) GC() {} | |||
func init() { | |||
session.Register("nodb", &NodbProvider{}) | |||
} |
@ -0,0 +1,201 @@ | |||
// Copyright 2013 Beego Authors | |||
// Copyright 2014 The Macaron Authors | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"): you may | |||
// not use this file except in compliance with the License. You may obtain | |||
// a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||
// License for the specific language governing permissions and limitations | |||
// under the License. | |||
package session | |||
import ( | |||
"database/sql" | |||
"fmt" | |||
"log" | |||
"sync" | |||
"time" | |||
_ "github.com/lib/pq" | |||
"github.com/go-macaron/session" | |||
) | |||
// PostgresStore represents a postgres session store implementation. | |||
type PostgresStore struct { | |||
c *sql.DB | |||
sid string | |||
lock sync.RWMutex | |||
data map[interface{}]interface{} | |||
} | |||
// NewPostgresStore creates and returns a postgres session store. | |||
func NewPostgresStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *PostgresStore { | |||
return &PostgresStore{ | |||
c: c, | |||
sid: sid, | |||
data: kv, | |||
} | |||
} | |||
// Set sets value to given key in session. | |||
func (s *PostgresStore) Set(key, value interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data[key] = value | |||
return nil | |||
} | |||
// Get gets value by given key in session. | |||
func (s *PostgresStore) Get(key interface{}) interface{} { | |||
s.lock.RLock() | |||
defer s.lock.RUnlock() | |||
return s.data[key] | |||
} | |||
// Delete delete a key from session. | |||
func (s *PostgresStore) Delete(key interface{}) error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
delete(s.data, key) | |||
return nil | |||
} | |||
// ID returns current session ID. | |||
func (s *PostgresStore) ID() string { | |||
return s.sid | |||
} | |||
// save postgres session values to database. | |||
// must call this method to save values to database. | |||
func (s *PostgresStore) Release() error { | |||
// Skip encoding if the data is empty | |||
if len(s.data) == 0 { | |||
return nil | |||
} | |||
data, err := session.EncodeGob(s.data) | |||
if err != nil { | |||
return err | |||
} | |||
_, err = s.c.Exec("UPDATE session SET data=$1, expiry=$2 WHERE key=$3", | |||
data, time.Now().Unix(), s.sid) | |||
return err | |||
} | |||
// Flush deletes all session data. | |||
func (s *PostgresStore) Flush() error { | |||
s.lock.Lock() | |||
defer s.lock.Unlock() | |||
s.data = make(map[interface{}]interface{}) | |||
return nil | |||
} | |||
// PostgresProvider represents a postgres session provider implementation. | |||
type PostgresProvider struct { | |||
c *sql.DB | |||
maxlifetime int64 | |||
} | |||
// Init initializes postgres session provider. | |||
// connStr: user=a password=b host=localhost port=5432 dbname=c sslmode=disable | |||
func (p *PostgresProvider) Init(maxlifetime int64, connStr string) (err error) { | |||
p.maxlifetime = maxlifetime | |||
p.c, err = sql.Open("postgres", connStr) | |||
if err != nil { | |||
return err | |||
} | |||
return p.c.Ping() | |||
} | |||
// Read returns raw session store by session ID. | |||
func (p *PostgresProvider) Read(sid string) (session.RawStore, error) { | |||
var data []byte | |||
err := p.c.QueryRow("SELECT data FROM session WHERE key=$1", sid).Scan(&data) | |||
if err == sql.ErrNoRows { | |||
_, err = p.c.Exec("INSERT INTO session(key,data,expiry) VALUES($1,$2,$3)", | |||
sid, "", time.Now().Unix()) | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
var kv map[interface{}]interface{} | |||
if len(data) == 0 { | |||
kv = make(map[interface{}]interface{}) | |||
} else { | |||
kv, err = session.DecodeGob(data) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return NewPostgresStore(p.c, sid, kv), nil | |||
} | |||
// Exist returns true if session with given ID exists. | |||
func (p *PostgresProvider) Exist(sid string) bool { | |||
var data []byte | |||
err := p.c.QueryRow("SELECT data FROM session WHERE key=$1", sid).Scan(&data) | |||
if err != nil && err != sql.ErrNoRows { | |||
panic("session/postgres: error checking existence: " + err.Error()) | |||
} | |||
return err != sql.ErrNoRows | |||
} | |||
// Destory deletes a session by session ID. | |||
func (p *PostgresProvider) Destory(sid string) error { | |||
_, err := p.c.Exec("DELETE FROM session WHERE key=$1", sid) | |||
return err | |||
} | |||
// Regenerate regenerates a session store from old session ID to new one. | |||
func (p *PostgresProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { | |||
if p.Exist(sid) { | |||
return nil, fmt.Errorf("new sid '%s' already exists", sid) | |||
} | |||
if !p.Exist(oldsid) { | |||
if _, err = p.c.Exec("INSERT INTO session(key,data,expiry) VALUES($1,$2,$3)", | |||
oldsid, "", time.Now().Unix()); err != nil { | |||
return nil, err | |||
} | |||
} | |||
if _, err = p.c.Exec("UPDATE session SET key=$1 WHERE key=$2", sid, oldsid); err != nil { | |||
return nil, err | |||
} | |||
return p.Read(sid) | |||
} | |||
// Count counts and returns number of sessions. | |||
func (p *PostgresProvider) Count() (total int) { | |||
if err := p.c.QueryRow("SELECT COUNT(*) AS NUM FROM session").Scan(&total); err != nil { | |||
panic("session/postgres: error counting records: " + err.Error()) | |||
} | |||
return total | |||
} | |||
// GC calls GC to clean expired sessions. | |||
func (p *PostgresProvider) GC() { | |||
if _, err := p.c.Exec("DELETE FROM session WHERE EXTRACT(EPOCH FROM NOW()) - expiry > $1", p.maxlifetime); err != nil { | |||
log.Printf("session/postgres: error garbage collecting: %v", err) | |||
} | |||
} | |||
func init() { | |||
session.Register("postgres", &PostgresProvider{}) | |||
} |
@ -0,0 +1,27 @@ | |||
Copyright (c) 2014 - 2016 lunny | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are met: | |||
* Redistributions of source code must retain the above copyright notice, this | |||
list of conditions and the following disclaimer. | |||
* Redistributions in binary form must reproduce the above copyright notice, | |||
this list of conditions and the following disclaimer in the documentation | |||
and/or other materials provided with the distribution. | |||
* Neither the name of the {organization} nor the names of its | |||
contributors may be used to endorse or promote products derived from | |||
this software without specific prior written permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,36 @@ | |||
package log | |||
import ( | |||
"database/sql" | |||
"time" | |||
) | |||
type DBWriter struct { | |||
db *sql.DB | |||
stmt *sql.Stmt | |||
content chan []byte | |||
} | |||
func NewDBWriter(db *sql.DB) (*DBWriter, error) { | |||
_, err := db.Exec("CREATE TABLE IF NOT EXISTS log (id int, content text, created datetime)") | |||
if err != nil { | |||
return nil, err | |||
} | |||
stmt, err := db.Prepare("INSERT INTO log (content, created) values (?, ?)") | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &DBWriter{db, stmt, make(chan []byte, 1000)}, nil | |||
} | |||
func (w *DBWriter) Write(p []byte) (n int, err error) { | |||
_, err = w.stmt.Exec(string(p), time.Now()) | |||
if err == nil { | |||
n = len(p) | |||
} | |||
return | |||
} | |||
func (w *DBWriter) Close() { | |||
w.stmt.Close() | |||
} |
@ -0,0 +1,112 @@ | |||
package log | |||
import ( | |||
"io" | |||
"os" | |||
"path/filepath" | |||
"sync" | |||
"time" | |||
) | |||
var _ io.Writer = &Files{} | |||
type ByType int | |||
const ( | |||
ByDay ByType = iota | |||
ByHour | |||
ByMonth | |||
) | |||
var ( | |||
formats = map[ByType]string{ | |||
ByDay: "2006-01-02", | |||
ByHour: "2006-01-02-15", | |||
ByMonth: "2006-01", | |||
} | |||
) | |||
func SetFileFormat(t ByType, format string) { | |||
formats[t] = format | |||
} | |||
func (b ByType) Format() string { | |||
return formats[b] | |||
} | |||
type Files struct { | |||
FileOptions | |||
f *os.File | |||
lastFormat string | |||
lock sync.Mutex | |||
} | |||
type FileOptions struct { | |||
Dir string | |||
ByType ByType | |||
Loc *time.Location | |||
} | |||
func prepareFileOption(opts []FileOptions) FileOptions { | |||
var opt FileOptions | |||
if len(opts) > 0 { | |||
opt = opts[0] | |||
} | |||
if opt.Dir == "" { | |||
opt.Dir = "./" | |||
} | |||
err := os.MkdirAll(opt.Dir, os.ModePerm) | |||
if err != nil { | |||
panic(err.Error()) | |||
} | |||
if opt.Loc == nil { | |||
opt.Loc = time.Local | |||
} | |||
return opt | |||
} | |||
func NewFileWriter(opts ...FileOptions) *Files { | |||
opt := prepareFileOption(opts) | |||
return &Files{ | |||
FileOptions: opt, | |||
} | |||
} | |||
func (f *Files) getFile() (*os.File, error) { | |||
var err error | |||
t := time.Now().In(f.Loc) | |||
if f.f == nil { | |||
f.lastFormat = t.Format(f.ByType.Format()) | |||
f.f, err = os.OpenFile(filepath.Join(f.Dir, f.lastFormat+".log"), | |||
os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) | |||
return f.f, err | |||
} | |||
if f.lastFormat != t.Format(f.ByType.Format()) { | |||
f.f.Close() | |||
f.lastFormat = t.Format(f.ByType.Format()) | |||
f.f, err = os.OpenFile(filepath.Join(f.Dir, f.lastFormat+".log"), | |||
os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) | |||
return f.f, err | |||
} | |||
return f.f, nil | |||
} | |||
func (f *Files) Write(bs []byte) (int, error) { | |||
f.lock.Lock() | |||
defer f.lock.Unlock() | |||
w, err := f.getFile() | |||
if err != nil { | |||
return 0, err | |||
} | |||
return w.Write(bs) | |||
} | |||
func (f *Files) Close() { | |||
if f.f != nil { | |||
f.f.Close() | |||
f.f = nil | |||
} | |||
f.lastFormat = "" | |||
} |
@ -0,0 +1,595 @@ | |||
package log | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"io" | |||
"os" | |||
"runtime" | |||
"strings" | |||
"sync" | |||
"time" | |||
) | |||
// These flags define which text to prefix to each log entry generated by the Logger. | |||
const ( | |||
// Bits or'ed together to control what's printed. There is no control over the | |||
// order they appear (the order listed here) or the format they present (as | |||
// described in the comments). A colon appears after these items: | |||
// 2009/0123 01:23:23.123123 /a/b/c/d.go:23: message | |||
Ldate = 1 << iota // the date: 2009/0123 | |||
Ltime // the time: 01:23:23 | |||
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime. | |||
Llongfile // full file name and line number: /a/b/c/d.go:23 | |||
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile | |||
Lmodule // module name | |||
Llevel // level: 0(Debug), 1(Info), 2(Warn), 3(Error), 4(Panic), 5(Fatal) | |||
Llongcolor // color will start [info] end of line | |||
Lshortcolor // color only include [info] | |||
LstdFlags = Ldate | Ltime // initial values for the standard logger | |||
//Ldefault = Llevel | LstdFlags | Lshortfile | Llongcolor | |||
) // [prefix][time][level][module][shortfile|longfile] | |||
func Ldefault() int { | |||
if runtime.GOOS == "windows" { | |||
return Llevel | LstdFlags | Lshortfile | |||
} | |||
return Llevel | LstdFlags | Lshortfile | Llongcolor | |||
} | |||
func Version() string { | |||
return "0.2.0.1121" | |||
} | |||
const ( | |||
Lall = iota | |||
) | |||
const ( | |||
Ldebug = iota | |||
Linfo | |||
Lwarn | |||
Lerror | |||
Lpanic | |||
Lfatal | |||
Lnone | |||
) | |||
const ( | |||
ForeBlack = iota + 30 //30 | |||
ForeRed //31 | |||
ForeGreen //32 | |||
ForeYellow //33 | |||
ForeBlue //34 | |||
ForePurple //35 | |||
ForeCyan //36 | |||
ForeWhite //37 | |||
) | |||
const ( | |||
BackBlack = iota + 40 //40 | |||
BackRed //41 | |||
BackGreen //42 | |||
BackYellow //43 | |||
BackBlue //44 | |||
BackPurple //45 | |||
BackCyan //46 | |||
BackWhite //47 | |||
) | |||
var levels = []string{ | |||
"[Debug]", | |||
"[Info]", | |||
"[Warn]", | |||
"[Error]", | |||
"[Panic]", | |||
"[Fatal]", | |||
} | |||
// MUST called before all logs | |||
func SetLevels(lvs []string) { | |||
levels = lvs | |||
} | |||
var colors = []int{ | |||
ForeCyan, | |||
ForeGreen, | |||
ForeYellow, | |||
ForeRed, | |||
ForePurple, | |||
ForeBlue, | |||
} | |||
// MUST called before all logs | |||
func SetColors(cls []int) { | |||
colors = cls | |||
} | |||
// A Logger represents an active logging object that generates lines of | |||
// output to an io.Writer. Each logging operation makes a single call to | |||
// the Writer's Write method. A Logger can be used simultaneously from | |||
// multiple goroutines; it guarantees to serialize access to the Writer. | |||
type Logger struct { | |||
mu sync.Mutex // ensures atomic writes; protects the following fields | |||
prefix string // prefix to write at beginning of each line | |||
flag int // properties | |||
Level int | |||
out io.Writer // destination for output | |||
buf bytes.Buffer // for accumulating text to write | |||
levelStats [6]int64 | |||
loc *time.Location | |||
} | |||
// New creates a new Logger. The out variable sets the | |||
// destination to which log data will be written. | |||
// The prefix appears at the beginning of each generated log line. | |||
// The flag argument defines the logging properties. | |||
func New(out io.Writer, prefix string, flag int) *Logger { | |||
l := &Logger{out: out, prefix: prefix, Level: 1, flag: flag, loc: time.Local} | |||
if out != os.Stdout { | |||
l.flag = RmColorFlags(l.flag) | |||
} | |||
return l | |||
} | |||
var Std = New(os.Stderr, "", Ldefault()) | |||
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding. | |||
// Knows the buffer has capacity. | |||
func itoa(buf *bytes.Buffer, i int, wid int) { | |||
var u uint = uint(i) | |||
if u == 0 && wid <= 1 { | |||
buf.WriteByte('0') | |||
return | |||
} | |||
// Assemble decimal in reverse order. | |||
var b [32]byte | |||
bp := len(b) | |||
for ; u > 0 || wid > 0; u /= 10 { | |||
bp-- | |||
wid-- | |||
b[bp] = byte(u%10) + '0' | |||
} | |||
// avoid slicing b to avoid an allocation. | |||
for bp < len(b) { | |||
buf.WriteByte(b[bp]) | |||
bp++ | |||
} | |||
} | |||
func moduleOf(file string) string { | |||
pos := strings.LastIndex(file, "/") | |||
if pos != -1 { | |||
pos1 := strings.LastIndex(file[:pos], "/src/") | |||
if pos1 != -1 { | |||
return file[pos1+5 : pos] | |||
} | |||
} | |||
return "UNKNOWN" | |||
} | |||
func (l *Logger) formatHeader(buf *bytes.Buffer, t time.Time, | |||
file string, line int, lvl int, reqId string) { | |||
if l.prefix != "" { | |||
buf.WriteString(l.prefix) | |||
} | |||
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { | |||
if l.flag&Ldate != 0 { | |||
year, month, day := t.Date() | |||
itoa(buf, year, 4) | |||
buf.WriteByte('/') | |||
itoa(buf, int(month), 2) | |||
buf.WriteByte('/') | |||
itoa(buf, day, 2) | |||
buf.WriteByte(' ') | |||
} | |||
if l.flag&(Ltime|Lmicroseconds) != 0 { | |||
hour, min, sec := t.Clock() | |||
itoa(buf, hour, 2) | |||
buf.WriteByte(':') | |||
itoa(buf, min, 2) | |||
buf.WriteByte(':') | |||
itoa(buf, sec, 2) | |||
if l.flag&Lmicroseconds != 0 { | |||
buf.WriteByte('.') | |||
itoa(buf, t.Nanosecond()/1e3, 6) | |||
} | |||
buf.WriteByte(' ') | |||
} | |||
} | |||
if reqId != "" { | |||
buf.WriteByte('[') | |||
buf.WriteString(reqId) | |||
buf.WriteByte(']') | |||
buf.WriteByte(' ') | |||
} | |||
if l.flag&(Lshortcolor|Llongcolor) != 0 { | |||
buf.WriteString(fmt.Sprintf("\033[1;%dm", colors[lvl])) | |||
} | |||
if l.flag&Llevel != 0 { | |||
buf.WriteString(levels[lvl]) | |||
buf.WriteByte(' ') | |||
} | |||
if l.flag&Lshortcolor != 0 { | |||
buf.WriteString("\033[0m") | |||
} | |||
if l.flag&Lmodule != 0 { | |||
buf.WriteByte('[') | |||
buf.WriteString(moduleOf(file)) | |||
buf.WriteByte(']') | |||
buf.WriteByte(' ') | |||
} | |||
if l.flag&(Lshortfile|Llongfile) != 0 { | |||
if l.flag&Lshortfile != 0 { | |||
short := file | |||
for i := len(file) - 1; i > 0; i-- { | |||
if file[i] == '/' { | |||
short = file[i+1:] | |||
break | |||
} | |||
} | |||
file = short | |||
} | |||
buf.WriteString(file) | |||
buf.WriteByte(':') | |||
itoa(buf, line, -1) | |||
buf.WriteByte(' ') | |||
} | |||
} | |||
// Output writes the output for a logging event. The string s contains | |||
// the text to print after the prefix specified by the flags of the | |||
// Logger. A newline is appended if the last character of s is not | |||
// already a newline. Calldepth is used to recover the PC and is | |||
// provided for generality, although at the moment on all pre-defined | |||
// paths it will be 2. | |||
func (l *Logger) Output(reqId string, lvl int, calldepth int, s string) error { | |||
if lvl < l.Level { | |||
return nil | |||
} | |||
now := time.Now().In(l.loc) // get this early. | |||
var file string | |||
var line int | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
if l.flag&(Lshortfile|Llongfile|Lmodule) != 0 { | |||
// release lock while getting caller info - it's expensive. | |||
l.mu.Unlock() | |||
var ok bool | |||
_, file, line, ok = runtime.Caller(calldepth) | |||
if !ok { | |||
file = "???" | |||
line = 0 | |||
} | |||
l.mu.Lock() | |||
} | |||
l.levelStats[lvl]++ | |||
l.buf.Reset() | |||
l.formatHeader(&l.buf, now, file, line, lvl, reqId) | |||
l.buf.WriteString(s) | |||
if l.flag&Llongcolor != 0 { | |||
l.buf.WriteString("\033[0m") | |||
} | |||
if len(s) > 0 && s[len(s)-1] != '\n' { | |||
l.buf.WriteByte('\n') | |||
} | |||
_, err := l.out.Write(l.buf.Bytes()) | |||
return err | |||
} | |||
// ----------------------------------------- | |||
// Printf calls l.Output to print to the logger. | |||
// Arguments are handled in the manner of fmt.Printf. | |||
func (l *Logger) Printf(format string, v ...interface{}) { | |||
l.Output("", Linfo, 2, fmt.Sprintf(format, v...)) | |||
} | |||
// Print calls l.Output to print to the logger. | |||
// Arguments are handled in the manner of fmt.Print. | |||
func (l *Logger) Print(v ...interface{}) { | |||
l.Output("", Linfo, 2, fmt.Sprint(v...)) | |||
} | |||
// Println calls l.Output to print to the logger. | |||
// Arguments are handled in the manner of fmt.Println. | |||
func (l *Logger) Println(v ...interface{}) { | |||
l.Output("", Linfo, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Debugf(format string, v ...interface{}) { | |||
l.Output("", Ldebug, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func (l *Logger) Debug(v ...interface{}) { | |||
l.Output("", Ldebug, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Infof(format string, v ...interface{}) { | |||
l.Output("", Linfo, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func (l *Logger) Info(v ...interface{}) { | |||
l.Output("", Linfo, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Warnf(format string, v ...interface{}) { | |||
l.Output("", Lwarn, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func (l *Logger) Warn(v ...interface{}) { | |||
l.Output("", Lwarn, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Errorf(format string, v ...interface{}) { | |||
l.Output("", Lerror, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func (l *Logger) Error(v ...interface{}) { | |||
l.Output("", Lerror, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Fatal(v ...interface{}) { | |||
l.Output("", Lfatal, 2, fmt.Sprintln(v...)) | |||
os.Exit(1) | |||
} | |||
// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1). | |||
func (l *Logger) Fatalf(format string, v ...interface{}) { | |||
l.Output("", Lfatal, 2, fmt.Sprintf(format, v...)) | |||
os.Exit(1) | |||
} | |||
// ----------------------------------------- | |||
// Panic is equivalent to l.Print() followed by a call to panic(). | |||
func (l *Logger) Panic(v ...interface{}) { | |||
s := fmt.Sprintln(v...) | |||
l.Output("", Lpanic, 2, s) | |||
panic(s) | |||
} | |||
// Panicf is equivalent to l.Printf() followed by a call to panic(). | |||
func (l *Logger) Panicf(format string, v ...interface{}) { | |||
s := fmt.Sprintf(format, v...) | |||
l.Output("", Lpanic, 2, s) | |||
panic(s) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Stack(v ...interface{}) { | |||
s := fmt.Sprint(v...) | |||
s += "\n" | |||
buf := make([]byte, 1024*1024) | |||
n := runtime.Stack(buf, true) | |||
s += string(buf[:n]) | |||
s += "\n" | |||
l.Output("", Lerror, 2, s) | |||
} | |||
// ----------------------------------------- | |||
func (l *Logger) Stat() (stats []int64) { | |||
l.mu.Lock() | |||
v := l.levelStats | |||
l.mu.Unlock() | |||
return v[:] | |||
} | |||
// Flags returns the output flags for the logger. | |||
func (l *Logger) Flags() int { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
return l.flag | |||
} | |||
func RmColorFlags(flag int) int { | |||
// for un std out, it should not show color since almost them don't support | |||
if flag&Llongcolor != 0 { | |||
flag = flag ^ Llongcolor | |||
} | |||
if flag&Lshortcolor != 0 { | |||
flag = flag ^ Lshortcolor | |||
} | |||
return flag | |||
} | |||
func (l *Logger) Location() *time.Location { | |||
return l.loc | |||
} | |||
func (l *Logger) SetLocation(loc *time.Location) { | |||
l.loc = loc | |||
} | |||
// SetFlags sets the output flags for the logger. | |||
func (l *Logger) SetFlags(flag int) { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
if l.out != os.Stdout { | |||
flag = RmColorFlags(flag) | |||
} | |||
l.flag = flag | |||
} | |||
// Prefix returns the output prefix for the logger. | |||
func (l *Logger) Prefix() string { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
return l.prefix | |||
} | |||
// SetPrefix sets the output prefix for the logger. | |||
func (l *Logger) SetPrefix(prefix string) { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
l.prefix = prefix | |||
} | |||
// SetOutputLevel sets the output level for the logger. | |||
func (l *Logger) SetOutputLevel(lvl int) { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
l.Level = lvl | |||
} | |||
func (l *Logger) OutputLevel() int { | |||
return l.Level | |||
} | |||
func (l *Logger) SetOutput(w io.Writer) { | |||
l.mu.Lock() | |||
defer l.mu.Unlock() | |||
l.out = w | |||
if w != os.Stdout { | |||
l.flag = RmColorFlags(l.flag) | |||
} | |||
} | |||
// SetOutput sets the output destination for the standard logger. | |||
func SetOutput(w io.Writer) { | |||
Std.SetOutput(w) | |||
} | |||
func SetLocation(loc *time.Location) { | |||
Std.SetLocation(loc) | |||
} | |||
func Location() *time.Location { | |||
return Std.Location() | |||
} | |||
// Flags returns the output flags for the standard logger. | |||
func Flags() int { | |||
return Std.Flags() | |||
} | |||
// SetFlags sets the output flags for the standard logger. | |||
func SetFlags(flag int) { | |||
Std.SetFlags(flag) | |||
} | |||
// Prefix returns the output prefix for the standard logger. | |||
func Prefix() string { | |||
return Std.Prefix() | |||
} | |||
// SetPrefix sets the output prefix for the standard logger. | |||
func SetPrefix(prefix string) { | |||
Std.SetPrefix(prefix) | |||
} | |||
func SetOutputLevel(lvl int) { | |||
Std.SetOutputLevel(lvl) | |||
} | |||
func OutputLevel() int { | |||
return Std.OutputLevel() | |||
} | |||
// ----------------------------------------- | |||
// Print calls Output to print to the standard logger. | |||
// Arguments are handled in the manner of fmt.Print. | |||
func Print(v ...interface{}) { | |||
Std.Output("", Linfo, 2, fmt.Sprintln(v...)) | |||
} | |||
// Printf calls Output to print to the standard logger. | |||
// Arguments are handled in the manner of fmt.Printf. | |||
func Printf(format string, v ...interface{}) { | |||
Std.Output("", Linfo, 2, fmt.Sprintf(format, v...)) | |||
} | |||
// Println calls Output to print to the standard logger. | |||
// Arguments are handled in the manner of fmt.Println. | |||
func Println(v ...interface{}) { | |||
Std.Output("", Linfo, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func Debugf(format string, v ...interface{}) { | |||
Std.Output("", Ldebug, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func Debug(v ...interface{}) { | |||
Std.Output("", Ldebug, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func Infof(format string, v ...interface{}) { | |||
Std.Output("", Linfo, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func Info(v ...interface{}) { | |||
Std.Output("", Linfo, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func Warnf(format string, v ...interface{}) { | |||
Std.Output("", Lwarn, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func Warn(v ...interface{}) { | |||
Std.Output("", Lwarn, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
func Errorf(format string, v ...interface{}) { | |||
Std.Output("", Lerror, 2, fmt.Sprintf(format, v...)) | |||
} | |||
func Error(v ...interface{}) { | |||
Std.Output("", Lerror, 2, fmt.Sprintln(v...)) | |||
} | |||
// ----------------------------------------- | |||
// Fatal is equivalent to Print() followed by a call to os.Exit(1). | |||
func Fatal(v ...interface{}) { | |||
Std.Output("", Lfatal, 2, fmt.Sprintln(v...)) | |||
} | |||
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1). | |||
func Fatalf(format string, v ...interface{}) { | |||
Std.Output("", Lfatal, 2, fmt.Sprintf(format, v...)) | |||
} | |||
// ----------------------------------------- | |||
// Panic is equivalent to Print() followed by a call to panic(). | |||
func Panic(v ...interface{}) { | |||
Std.Output("", Lpanic, 2, fmt.Sprintln(v...)) | |||
} | |||
// Panicf is equivalent to Printf() followed by a call to panic(). | |||
func Panicf(format string, v ...interface{}) { | |||
Std.Output("", Lpanic, 2, fmt.Sprintf(format, v...)) | |||
} | |||
// ----------------------------------------- | |||
func Stack(v ...interface{}) { | |||
s := fmt.Sprint(v...) | |||
s += "\n" | |||
buf := make([]byte, 1024*1024) | |||
n := runtime.Stack(buf, true) | |||
s += string(buf[:n]) | |||
s += "\n" | |||
Std.Output("", Lerror, 2, s) | |||
} | |||
// ----------------------------------------- |
@ -0,0 +1,21 @@ | |||
The MIT License (MIT) | |||
Copyright (c) 2014 siddontang | |||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||
of this software and associated documentation files (the "Software"), to deal | |||
in the Software without restriction, including without limitation the rights | |||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
copies of the Software, and to permit persons to whom the Software is | |||
furnished to do so, subject to the following conditions: | |||
The above copyright notice and this permission notice shall be included in all | |||
copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
SOFTWARE. |
@ -0,0 +1,106 @@ | |||
package nodb | |||
import ( | |||
"sync" | |||
"github.com/lunny/nodb/store" | |||
) | |||
type batch struct { | |||
l *Nodb | |||
store.WriteBatch | |||
sync.Locker | |||
logs [][]byte | |||
tx *Tx | |||
} | |||
func (b *batch) Commit() error { | |||
b.l.commitLock.Lock() | |||
defer b.l.commitLock.Unlock() | |||
err := b.WriteBatch.Commit() | |||
if b.l.binlog != nil { | |||
if err == nil { | |||
if b.tx == nil { | |||
b.l.binlog.Log(b.logs...) | |||
} else { | |||
b.tx.logs = append(b.tx.logs, b.logs...) | |||
} | |||
} | |||
b.logs = [][]byte{} | |||
} | |||
return err | |||
} | |||
func (b *batch) Lock() { | |||
b.Locker.Lock() | |||
} | |||
func (b *batch) Unlock() { | |||
if b.l.binlog != nil { | |||
b.logs = [][]byte{} | |||
} | |||
b.WriteBatch.Rollback() | |||
b.Locker.Unlock() | |||
} | |||
func (b *batch) Put(key []byte, value []byte) { | |||
if b.l.binlog != nil { | |||
buf := encodeBinLogPut(key, value) | |||
b.logs = append(b.logs, buf) | |||
} | |||
b.WriteBatch.Put(key, value) | |||
} | |||
func (b *batch) Delete(key []byte) { | |||
if b.l.binlog != nil { | |||
buf := encodeBinLogDelete(key) | |||
b.logs = append(b.logs, buf) | |||
} | |||
b.WriteBatch.Delete(key) | |||
} | |||
type dbBatchLocker struct { | |||
l *sync.Mutex | |||
wrLock *sync.RWMutex | |||
} | |||
func (l *dbBatchLocker) Lock() { | |||
l.wrLock.RLock() | |||
l.l.Lock() | |||
} | |||
func (l *dbBatchLocker) Unlock() { | |||
l.l.Unlock() | |||
l.wrLock.RUnlock() | |||
} | |||
type txBatchLocker struct { | |||
} | |||
func (l *txBatchLocker) Lock() {} | |||
func (l *txBatchLocker) Unlock() {} | |||
type multiBatchLocker struct { | |||
} | |||
func (l *multiBatchLocker) Lock() {} | |||
func (l *multiBatchLocker) Unlock() {} | |||
func (l *Nodb) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch { | |||
b := new(batch) | |||
b.l = l | |||
b.WriteBatch = wb | |||
b.tx = tx | |||
b.Locker = locker | |||
b.logs = [][]byte{} | |||
return b | |||
} |
@ -0,0 +1,391 @@ | |||
package nodb | |||
import ( | |||
"bufio" | |||
"encoding/binary" | |||
"fmt" | |||
"io" | |||
"io/ioutil" | |||
"os" | |||
"path" | |||
"strconv" | |||
"strings" | |||
"sync" | |||
"time" | |||
"github.com/lunny/log" | |||
"github.com/lunny/nodb/config" | |||
) | |||
type BinLogHead struct { | |||
CreateTime uint32 | |||
BatchId uint32 | |||
PayloadLen uint32 | |||
} | |||
func (h *BinLogHead) Len() int { | |||
return 12 | |||
} | |||
func (h *BinLogHead) Write(w io.Writer) error { | |||
if err := binary.Write(w, binary.BigEndian, h.CreateTime); err != nil { | |||
return err | |||
} | |||
if err := binary.Write(w, binary.BigEndian, h.BatchId); err != nil { | |||
return err | |||
} | |||
if err := binary.Write(w, binary.BigEndian, h.PayloadLen); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (h *BinLogHead) handleReadError(err error) error { | |||
if err == io.EOF { | |||
return io.ErrUnexpectedEOF | |||
} else { | |||
return err | |||
} | |||
} | |||
func (h *BinLogHead) Read(r io.Reader) error { | |||
var err error | |||
if err = binary.Read(r, binary.BigEndian, &h.CreateTime); err != nil { | |||
return err | |||
} | |||
if err = binary.Read(r, binary.BigEndian, &h.BatchId); err != nil { | |||
return h.handleReadError(err) | |||
} | |||
if err = binary.Read(r, binary.BigEndian, &h.PayloadLen); err != nil { | |||
return h.handleReadError(err) | |||
} | |||
return nil | |||
} | |||
func (h *BinLogHead) InSameBatch(ho *BinLogHead) bool { | |||
if h.CreateTime == ho.CreateTime && h.BatchId == ho.BatchId { | |||
return true | |||
} else { | |||
return false | |||
} | |||
} | |||
/* | |||
index file format: | |||
ledis-bin.00001 | |||
ledis-bin.00002 | |||
ledis-bin.00003 | |||
log file format | |||
Log: Head|PayloadData | |||
Head: createTime|batchId|payloadData | |||
*/ | |||
type BinLog struct { | |||
sync.Mutex | |||
path string | |||
cfg *config.BinLogConfig | |||
logFile *os.File | |||
logWb *bufio.Writer | |||
indexName string | |||
logNames []string | |||
lastLogIndex int64 | |||
batchId uint32 | |||
ch chan struct{} | |||
} | |||
func NewBinLog(cfg *config.Config) (*BinLog, error) { | |||
l := new(BinLog) | |||
l.cfg = &cfg.BinLog | |||
l.cfg.Adjust() | |||
l.path = path.Join(cfg.DataDir, "binlog") | |||
if err := os.MkdirAll(l.path, os.ModePerm); err != nil { | |||
return nil, err | |||
} | |||
l.logNames = make([]string, 0, 16) | |||
l.ch = make(chan struct{}) | |||
if err := l.loadIndex(); err != nil { | |||
return nil, err | |||
} | |||
return l, nil | |||
} | |||
func (l *BinLog) flushIndex() error { | |||
data := strings.Join(l.logNames, "\n") | |||
bakName := fmt.Sprintf("%s.bak", l.indexName) | |||
f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666) | |||
if err != nil { | |||
log.Error("create binlog bak index error %s", err.Error()) | |||
return err | |||
} | |||
if _, err := f.WriteString(data); err != nil { | |||
log.Error("write binlog index error %s", err.Error()) | |||
f.Close() | |||
return err | |||
} | |||
f.Close() | |||
if err := os.Rename(bakName, l.indexName); err != nil { | |||
log.Error("rename binlog bak index error %s", err.Error()) | |||
return err | |||
} | |||
return nil | |||
} | |||
func (l *BinLog) loadIndex() error { | |||
l.indexName = path.Join(l.path, fmt.Sprintf("ledis-bin.index")) | |||
if _, err := os.Stat(l.indexName); os.IsNotExist(err) { | |||
//no index file, nothing to do | |||
} else { | |||
indexData, err := ioutil.ReadFile(l.indexName) | |||
if err != nil { | |||
return err | |||
} | |||
lines := strings.Split(string(indexData), "\n") | |||
for _, line := range lines { | |||
line = strings.Trim(line, "\r\n ") | |||
if len(line) == 0 { | |||
continue | |||
} | |||
if _, err := os.Stat(path.Join(l.path, line)); err != nil { | |||
log.Error("load index line %s error %s", line, err.Error()) | |||
return err | |||
} else { | |||
l.logNames = append(l.logNames, line) | |||
} | |||
} | |||
} | |||
if l.cfg.MaxFileNum > 0 && len(l.logNames) > l.cfg.MaxFileNum { | |||
//remove oldest logfile | |||
if err := l.Purge(len(l.logNames) - l.cfg.MaxFileNum); err != nil { | |||
return err | |||
} | |||
} | |||
var err error | |||
if len(l.logNames) == 0 { | |||
l.lastLogIndex = 1 | |||
} else { | |||
lastName := l.logNames[len(l.logNames)-1] | |||
if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil { | |||
log.Error("invalid logfile name %s", err.Error()) | |||
return err | |||
} | |||
//like mysql, if server restart, a new binlog will create | |||
l.lastLogIndex++ | |||
} | |||
return nil | |||
} | |||
func (l *BinLog) getLogFile() string { | |||
return l.FormatLogFileName(l.lastLogIndex) | |||
} | |||
func (l *BinLog) openNewLogFile() error { | |||
var err error | |||
lastName := l.getLogFile() | |||
logPath := path.Join(l.path, lastName) | |||
if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil { | |||
log.Error("open new logfile error %s", err.Error()) | |||
return err | |||
} | |||
if l.cfg.MaxFileNum > 0 && len(l.logNames) == l.cfg.MaxFileNum { | |||
l.purge(1) | |||
} | |||
l.logNames = append(l.logNames, lastName) | |||
if l.logWb == nil { | |||
l.logWb = bufio.NewWriterSize(l.logFile, 1024) | |||
} else { | |||
l.logWb.Reset(l.logFile) | |||
} | |||
if err = l.flushIndex(); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (l *BinLog) checkLogFileSize() bool { | |||
if l.logFile == nil { | |||
return false | |||
} | |||
st, _ := l.logFile.Stat() | |||
if st.Size() >= int64(l.cfg.MaxFileSize) { | |||
l.closeLog() | |||
return true | |||
} | |||
return false | |||
} | |||
func (l *BinLog) closeLog() { | |||
l.lastLogIndex++ | |||
l.logFile.Close() | |||
l.logFile = nil | |||
} | |||
func (l *BinLog) purge(n int) { | |||
for i := 0; i < n; i++ { | |||
logPath := path.Join(l.path, l.logNames[i]) | |||
os.Remove(logPath) | |||
} | |||
copy(l.logNames[0:], l.logNames[n:]) | |||
l.logNames = l.logNames[0 : len(l.logNames)-n] | |||
} | |||
func (l *BinLog) Close() { | |||
if l.logFile != nil { | |||
l.logFile.Close() | |||
l.logFile = nil | |||
} | |||
} | |||
func (l *BinLog) LogNames() []string { | |||
return l.logNames | |||
} | |||
func (l *BinLog) LogFileName() string { | |||
return l.getLogFile() | |||
} | |||
func (l *BinLog) LogFilePos() int64 { | |||
if l.logFile == nil { | |||
return 0 | |||
} else { | |||
st, _ := l.logFile.Stat() | |||
return st.Size() | |||
} | |||
} | |||
func (l *BinLog) LogFileIndex() int64 { | |||
return l.lastLogIndex | |||
} | |||
func (l *BinLog) FormatLogFileName(index int64) string { | |||
return fmt.Sprintf("ledis-bin.%07d", index) | |||
} | |||
func (l *BinLog) FormatLogFilePath(index int64) string { | |||
return path.Join(l.path, l.FormatLogFileName(index)) | |||
} | |||
func (l *BinLog) LogPath() string { | |||
return l.path | |||
} | |||
func (l *BinLog) Purge(n int) error { | |||
l.Lock() | |||
defer l.Unlock() | |||
if len(l.logNames) == 0 { | |||
return nil | |||
} | |||
if n >= len(l.logNames) { | |||
n = len(l.logNames) | |||
//can not purge current log file | |||
if l.logNames[n-1] == l.getLogFile() { | |||
n = n - 1 | |||
} | |||
} | |||
l.purge(n) | |||
return l.flushIndex() | |||
} | |||
func (l *BinLog) PurgeAll() error { | |||
l.Lock() | |||
defer l.Unlock() | |||
l.closeLog() | |||
return l.openNewLogFile() | |||
} | |||
func (l *BinLog) Log(args ...[]byte) error { | |||
l.Lock() | |||
defer l.Unlock() | |||
var err error | |||
if l.logFile == nil { | |||
if err = l.openNewLogFile(); err != nil { | |||
return err | |||
} | |||
} | |||
head := &BinLogHead{} | |||
head.CreateTime = uint32(time.Now().Unix()) | |||
head.BatchId = l.batchId | |||
l.batchId++ | |||
for _, data := range args { | |||
head.PayloadLen = uint32(len(data)) | |||
if err := head.Write(l.logWb); err != nil { | |||
return err | |||
} | |||
if _, err := l.logWb.Write(data); err != nil { | |||
return err | |||
} | |||
} | |||
if err = l.logWb.Flush(); err != nil { | |||
log.Error("write log error %s", err.Error()) | |||
return err | |||
} | |||
l.checkLogFileSize() | |||
close(l.ch) | |||
l.ch = make(chan struct{}) | |||
return nil | |||
} | |||
func (l *BinLog) Wait() <-chan struct{} { | |||
return l.ch | |||
} |
@ -0,0 +1,215 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"fmt" | |||
"strconv" | |||
) | |||
var ( | |||
errBinLogDeleteType = errors.New("invalid bin log delete type") | |||
errBinLogPutType = errors.New("invalid bin log put type") | |||
errBinLogCommandType = errors.New("invalid bin log command type") | |||
) | |||
func encodeBinLogDelete(key []byte) []byte { | |||
buf := make([]byte, 1+len(key)) | |||
buf[0] = BinLogTypeDeletion | |||
copy(buf[1:], key) | |||
return buf | |||
} | |||
func decodeBinLogDelete(sz []byte) ([]byte, error) { | |||
if len(sz) < 1 || sz[0] != BinLogTypeDeletion { | |||
return nil, errBinLogDeleteType | |||
} | |||
return sz[1:], nil | |||
} | |||
func encodeBinLogPut(key []byte, value []byte) []byte { | |||
buf := make([]byte, 3+len(key)+len(value)) | |||
buf[0] = BinLogTypePut | |||
pos := 1 | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
copy(buf[pos:], value) | |||
return buf | |||
} | |||
func decodeBinLogPut(sz []byte) ([]byte, []byte, error) { | |||
if len(sz) < 3 || sz[0] != BinLogTypePut { | |||
return nil, nil, errBinLogPutType | |||
} | |||
keyLen := int(binary.BigEndian.Uint16(sz[1:])) | |||
if 3+keyLen > len(sz) { | |||
return nil, nil, errBinLogPutType | |||
} | |||
return sz[3 : 3+keyLen], sz[3+keyLen:], nil | |||
} | |||
func FormatBinLogEvent(event []byte) (string, error) { | |||
logType := uint8(event[0]) | |||
var err error | |||
var k []byte | |||
var v []byte | |||
var buf []byte = make([]byte, 0, 1024) | |||
switch logType { | |||
case BinLogTypePut: | |||
k, v, err = decodeBinLogPut(event) | |||
buf = append(buf, "PUT "...) | |||
case BinLogTypeDeletion: | |||
k, err = decodeBinLogDelete(event) | |||
buf = append(buf, "DELETE "...) | |||
default: | |||
err = errInvalidBinLogEvent | |||
} | |||
if err != nil { | |||
return "", err | |||
} | |||
if buf, err = formatDataKey(buf, k); err != nil { | |||
return "", err | |||
} | |||
if v != nil && len(v) != 0 { | |||
buf = append(buf, fmt.Sprintf(" %q", v)...) | |||
} | |||
return String(buf), nil | |||
} | |||
func formatDataKey(buf []byte, k []byte) ([]byte, error) { | |||
if len(k) < 2 { | |||
return nil, errInvalidBinLogEvent | |||
} | |||
buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...) | |||
buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...) | |||
db := new(DB) | |||
db.index = k[0] | |||
//to do format at respective place | |||
switch k[1] { | |||
case KVType: | |||
if key, err := db.decodeKVKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case HashType: | |||
if key, field, err := db.hDecodeHashKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(field)) | |||
} | |||
case HSizeType: | |||
if key, err := db.hDecodeSizeKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case ListType: | |||
if key, seq, err := db.lDecodeListKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendInt(buf, int64(seq), 10) | |||
} | |||
case LMetaType: | |||
if key, err := db.lDecodeMetaKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case ZSetType: | |||
if key, m, err := db.zDecodeSetKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(m)) | |||
} | |||
case ZSizeType: | |||
if key, err := db.zDecodeSizeKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case ZScoreType: | |||
if key, m, score, err := db.zDecodeScoreKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(m)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendInt(buf, score, 10) | |||
} | |||
case BitType: | |||
if key, seq, err := db.bDecodeBinKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendUint(buf, uint64(seq), 10) | |||
} | |||
case BitMetaType: | |||
if key, err := db.bDecodeMetaKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case SetType: | |||
if key, member, err := db.sDecodeSetKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(member)) | |||
} | |||
case SSizeType: | |||
if key, err := db.sDecodeSizeKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
case ExpTimeType: | |||
if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = append(buf, TypeName[tp]...) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendInt(buf, t, 10) | |||
} | |||
case ExpMetaType: | |||
if tp, key, err := db.expDecodeMetaKey(k); err != nil { | |||
return nil, err | |||
} else { | |||
buf = append(buf, TypeName[tp]...) | |||
buf = append(buf, ' ') | |||
buf = strconv.AppendQuote(buf, String(key)) | |||
} | |||
default: | |||
return nil, errInvalidBinLogEvent | |||
} | |||
return buf, nil | |||
} |
@ -0,0 +1,135 @@ | |||
package config | |||
import ( | |||
"io/ioutil" | |||
"github.com/BurntSushi/toml" | |||
) | |||
type Size int | |||
const ( | |||
DefaultAddr string = "127.0.0.1:6380" | |||
DefaultHttpAddr string = "127.0.0.1:11181" | |||
DefaultDBName string = "goleveldb" | |||
DefaultDataDir string = "./data" | |||
) | |||
const ( | |||
MaxBinLogFileSize int = 1024 * 1024 * 1024 | |||
MaxBinLogFileNum int = 10000 | |||
DefaultBinLogFileSize int = MaxBinLogFileSize | |||
DefaultBinLogFileNum int = 10 | |||
) | |||
type LevelDBConfig struct { | |||
Compression bool `toml:"compression"` | |||
BlockSize int `toml:"block_size"` | |||
WriteBufferSize int `toml:"write_buffer_size"` | |||
CacheSize int `toml:"cache_size"` | |||
MaxOpenFiles int `toml:"max_open_files"` | |||
} | |||
type LMDBConfig struct { | |||
MapSize int `toml:"map_size"` | |||
NoSync bool `toml:"nosync"` | |||
} | |||
type BinLogConfig struct { | |||
MaxFileSize int `toml:"max_file_size"` | |||
MaxFileNum int `toml:"max_file_num"` | |||
} | |||
type Config struct { | |||
DataDir string `toml:"data_dir"` | |||
DBName string `toml:"db_name"` | |||
LevelDB LevelDBConfig `toml:"leveldb"` | |||
LMDB LMDBConfig `toml:"lmdb"` | |||
BinLog BinLogConfig `toml:"binlog"` | |||
SlaveOf string `toml:"slaveof"` | |||
AccessLog string `toml:"access_log"` | |||
} | |||
func NewConfigWithFile(fileName string) (*Config, error) { | |||
data, err := ioutil.ReadFile(fileName) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return NewConfigWithData(data) | |||
} | |||
func NewConfigWithData(data []byte) (*Config, error) { | |||
cfg := NewConfigDefault() | |||
_, err := toml.Decode(string(data), cfg) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return cfg, nil | |||
} | |||
func NewConfigDefault() *Config { | |||
cfg := new(Config) | |||
cfg.DataDir = DefaultDataDir | |||
cfg.DBName = DefaultDBName | |||
// disable binlog | |||
cfg.BinLog.MaxFileNum = 0 | |||
cfg.BinLog.MaxFileSize = 0 | |||
// disable replication | |||
cfg.SlaveOf = "" | |||
// disable access log | |||
cfg.AccessLog = "" | |||
cfg.LMDB.MapSize = 20 * 1024 * 1024 | |||
cfg.LMDB.NoSync = true | |||
return cfg | |||
} | |||
func (cfg *LevelDBConfig) Adjust() { | |||
if cfg.CacheSize <= 0 { | |||
cfg.CacheSize = 4 * 1024 * 1024 | |||
} | |||
if cfg.BlockSize <= 0 { | |||
cfg.BlockSize = 4 * 1024 | |||
} | |||
if cfg.WriteBufferSize <= 0 { | |||
cfg.WriteBufferSize = 4 * 1024 * 1024 | |||
} | |||
if cfg.MaxOpenFiles < 1024 { | |||
cfg.MaxOpenFiles = 1024 | |||
} | |||
} | |||
func (cfg *BinLogConfig) Adjust() { | |||
if cfg.MaxFileSize <= 0 { | |||
cfg.MaxFileSize = DefaultBinLogFileSize | |||
} else if cfg.MaxFileSize > MaxBinLogFileSize { | |||
cfg.MaxFileSize = MaxBinLogFileSize | |||
} | |||
if cfg.MaxFileNum <= 0 { | |||
cfg.MaxFileNum = DefaultBinLogFileNum | |||
} else if cfg.MaxFileNum > MaxBinLogFileNum { | |||
cfg.MaxFileNum = MaxBinLogFileNum | |||
} | |||
} |
@ -0,0 +1,98 @@ | |||
package nodb | |||
import ( | |||
"errors" | |||
) | |||
const ( | |||
NoneType byte = 0 | |||
KVType byte = 1 | |||
HashType byte = 2 | |||
HSizeType byte = 3 | |||
ListType byte = 4 | |||
LMetaType byte = 5 | |||
ZSetType byte = 6 | |||
ZSizeType byte = 7 | |||
ZScoreType byte = 8 | |||
BitType byte = 9 | |||
BitMetaType byte = 10 | |||
SetType byte = 11 | |||
SSizeType byte = 12 | |||
maxDataType byte = 100 | |||
ExpTimeType byte = 101 | |||
ExpMetaType byte = 102 | |||
) | |||
var ( | |||
TypeName = map[byte]string{ | |||
KVType: "kv", | |||
HashType: "hash", | |||
HSizeType: "hsize", | |||
ListType: "list", | |||
LMetaType: "lmeta", | |||
ZSetType: "zset", | |||
ZSizeType: "zsize", | |||
ZScoreType: "zscore", | |||
BitType: "bit", | |||
BitMetaType: "bitmeta", | |||
SetType: "set", | |||
SSizeType: "ssize", | |||
ExpTimeType: "exptime", | |||
ExpMetaType: "expmeta", | |||
} | |||
) | |||
const ( | |||
defaultScanCount int = 10 | |||
) | |||
var ( | |||
errKeySize = errors.New("invalid key size") | |||
errValueSize = errors.New("invalid value size") | |||
errHashFieldSize = errors.New("invalid hash field size") | |||
errSetMemberSize = errors.New("invalid set member size") | |||
errZSetMemberSize = errors.New("invalid zset member size") | |||
errExpireValue = errors.New("invalid expire value") | |||
) | |||
const ( | |||
//we don't support too many databases | |||
MaxDBNumber uint8 = 16 | |||
//max key size | |||
MaxKeySize int = 1024 | |||
//max hash field size | |||
MaxHashFieldSize int = 1024 | |||
//max zset member size | |||
MaxZSetMemberSize int = 1024 | |||
//max set member size | |||
MaxSetMemberSize int = 1024 | |||
//max value size | |||
MaxValueSize int = 10 * 1024 * 1024 | |||
) | |||
var ( | |||
ErrScoreMiss = errors.New("zset score miss") | |||
) | |||
const ( | |||
BinLogTypeDeletion uint8 = 0x0 | |||
BinLogTypePut uint8 = 0x1 | |||
BinLogTypeCommand uint8 = 0x2 | |||
) | |||
const ( | |||
DBAutoCommit uint8 = 0x0 | |||
DBInTransaction uint8 = 0x1 | |||
DBInMulti uint8 = 0x2 | |||
) | |||
var ( | |||
Version = "0.1" | |||
) |
@ -0,0 +1,61 @@ | |||
// package nodb is a high performance embedded NoSQL. | |||
// | |||
// nodb supports various data structure like kv, list, hash and zset like redis. | |||
// | |||
// Other features include binlog replication, data with a limited time-to-live. | |||
// | |||
// Usage | |||
// | |||
// First create a nodb instance before use: | |||
// | |||
// l := nodb.Open(cfg) | |||
// | |||
// cfg is a Config instance which contains configuration for nodb use, | |||
// like DataDir (root directory for nodb working to store data). | |||
// | |||
// After you create a nodb instance, you can select a DB to store you data: | |||
// | |||
// db, _ := l.Select(0) | |||
// | |||
// DB must be selected by a index, nodb supports only 16 databases, so the index range is [0-15]. | |||
// | |||
// KV | |||
// | |||
// KV is the most basic nodb type like any other key-value database. | |||
// | |||
// err := db.Set(key, value) | |||
// value, err := db.Get(key) | |||
// | |||
// List | |||
// | |||
// List is simply lists of values, sorted by insertion order. | |||
// You can push or pop value on the list head (left) or tail (right). | |||
// | |||
// err := db.LPush(key, value1) | |||
// err := db.RPush(key, value2) | |||
// value1, err := db.LPop(key) | |||
// value2, err := db.RPop(key) | |||
// | |||
// Hash | |||
// | |||
// Hash is a map between fields and values. | |||
// | |||
// n, err := db.HSet(key, field1, value1) | |||
// n, err := db.HSet(key, field2, value2) | |||
// value1, err := db.HGet(key, field1) | |||
// value2, err := db.HGet(key, field2) | |||
// | |||
// ZSet | |||
// | |||
// ZSet is a sorted collections of values. | |||
// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score. | |||
// Members are unique, but score may be same. | |||
// | |||
// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) | |||
// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) | |||
// | |||
// Binlog | |||
// | |||
// nodb supports binlog, so you can sync binlog to another server for replication. If you want to open binlog support, set UseBinLog to true in config. | |||
// | |||
package nodb |
@ -0,0 +1,200 @@ | |||
package nodb | |||
import ( | |||
"bufio" | |||
"bytes" | |||
"encoding/binary" | |||
"io" | |||
"os" | |||
"github.com/siddontang/go-snappy/snappy" | |||
) | |||
//dump format | |||
// fileIndex(bigendian int64)|filePos(bigendian int64) | |||
// |keylen(bigendian int32)|key|valuelen(bigendian int32)|value...... | |||
// | |||
//key and value are both compressed for fast transfer dump on network using snappy | |||
type BinLogAnchor struct { | |||
LogFileIndex int64 | |||
LogPos int64 | |||
} | |||
func (m *BinLogAnchor) WriteTo(w io.Writer) error { | |||
if err := binary.Write(w, binary.BigEndian, m.LogFileIndex); err != nil { | |||
return err | |||
} | |||
if err := binary.Write(w, binary.BigEndian, m.LogPos); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (m *BinLogAnchor) ReadFrom(r io.Reader) error { | |||
err := binary.Read(r, binary.BigEndian, &m.LogFileIndex) | |||
if err != nil { | |||
return err | |||
} | |||
err = binary.Read(r, binary.BigEndian, &m.LogPos) | |||
if err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (l *Nodb) DumpFile(path string) error { | |||
f, err := os.Create(path) | |||
if err != nil { | |||
return err | |||
} | |||
defer f.Close() | |||
return l.Dump(f) | |||
} | |||
func (l *Nodb) Dump(w io.Writer) error { | |||
m := new(BinLogAnchor) | |||
var err error | |||
l.wLock.Lock() | |||
defer l.wLock.Unlock() | |||
if l.binlog != nil { | |||
m.LogFileIndex = l.binlog.LogFileIndex() | |||
m.LogPos = l.binlog.LogFilePos() | |||
} | |||
wb := bufio.NewWriterSize(w, 4096) | |||
if err = m.WriteTo(wb); err != nil { | |||
return err | |||
} | |||
it := l.ldb.NewIterator() | |||
it.SeekToFirst() | |||
compressBuf := make([]byte, 4096) | |||
var key []byte | |||
var value []byte | |||
for ; it.Valid(); it.Next() { | |||
key = it.RawKey() | |||
value = it.RawValue() | |||
if key, err = snappy.Encode(compressBuf, key); err != nil { | |||
return err | |||
} | |||
if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil { | |||
return err | |||
} | |||
if _, err = wb.Write(key); err != nil { | |||
return err | |||
} | |||
if value, err = snappy.Encode(compressBuf, value); err != nil { | |||
return err | |||
} | |||
if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil { | |||
return err | |||
} | |||
if _, err = wb.Write(value); err != nil { | |||
return err | |||
} | |||
} | |||
if err = wb.Flush(); err != nil { | |||
return err | |||
} | |||
compressBuf = nil | |||
return nil | |||
} | |||
func (l *Nodb) LoadDumpFile(path string) (*BinLogAnchor, error) { | |||
f, err := os.Open(path) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer f.Close() | |||
return l.LoadDump(f) | |||
} | |||
func (l *Nodb) LoadDump(r io.Reader) (*BinLogAnchor, error) { | |||
l.wLock.Lock() | |||
defer l.wLock.Unlock() | |||
info := new(BinLogAnchor) | |||
rb := bufio.NewReaderSize(r, 4096) | |||
err := info.ReadFrom(rb) | |||
if err != nil { | |||
return nil, err | |||
} | |||
var keyLen uint16 | |||
var valueLen uint32 | |||
var keyBuf bytes.Buffer | |||
var valueBuf bytes.Buffer | |||
deKeyBuf := make([]byte, 4096) | |||
deValueBuf := make([]byte, 4096) | |||
var key, value []byte | |||
for { | |||
if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF { | |||
return nil, err | |||
} else if err == io.EOF { | |||
break | |||
} | |||
if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil { | |||
return nil, err | |||
} | |||
if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil { | |||
return nil, err | |||
} | |||
if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil { | |||
return nil, err | |||
} | |||
if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil { | |||
return nil, err | |||
} | |||
if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil { | |||
return nil, err | |||
} | |||
if err = l.ldb.Put(key, value); err != nil { | |||
return nil, err | |||
} | |||
keyBuf.Reset() | |||
valueBuf.Reset() | |||
} | |||
deKeyBuf = nil | |||
deValueBuf = nil | |||
//if binlog enable, we will delete all binlogs and open a new one for handling simply | |||
if l.binlog != nil { | |||
l.binlog.PurgeAll() | |||
} | |||
return info, nil | |||
} |
@ -0,0 +1,24 @@ | |||
package nodb | |||
// todo, add info | |||
// type Keyspace struct { | |||
// Kvs int `json:"kvs"` | |||
// KvExpires int `json:"kv_expires"` | |||
// Lists int `json:"lists"` | |||
// ListExpires int `json:"list_expires"` | |||
// Bitmaps int `json:"bitmaps"` | |||
// BitmapExpires int `json:"bitmap_expires"` | |||
// ZSets int `json:"zsets"` | |||
// ZSetExpires int `json:"zset_expires"` | |||
// Hashes int `json:"hashes"` | |||
// HashExpires int `json:"hahsh_expires"` | |||
// } | |||
// type Info struct { | |||
// KeySpaces [MaxDBNumber]Keyspace | |||
// } |
@ -0,0 +1,73 @@ | |||
package nodb | |||
import ( | |||
"errors" | |||
"fmt" | |||
) | |||
var ( | |||
ErrNestMulti = errors.New("nest multi not supported") | |||
ErrMultiDone = errors.New("multi has been closed") | |||
) | |||
type Multi struct { | |||
*DB | |||
} | |||
func (db *DB) IsInMulti() bool { | |||
return db.status == DBInMulti | |||
} | |||
// begin a mutli to execute commands, | |||
// it will block any other write operations before you close the multi, unlike transaction, mutli can not rollback | |||
func (db *DB) Multi() (*Multi, error) { | |||
if db.IsInMulti() { | |||
return nil, ErrNestMulti | |||
} | |||
m := new(Multi) | |||
m.DB = new(DB) | |||
m.DB.status = DBInMulti | |||
m.DB.l = db.l | |||
m.l.wLock.Lock() | |||
m.DB.sdb = db.sdb | |||
m.DB.bucket = db.sdb | |||
m.DB.index = db.index | |||
m.DB.kvBatch = m.newBatch() | |||
m.DB.listBatch = m.newBatch() | |||
m.DB.hashBatch = m.newBatch() | |||
m.DB.zsetBatch = m.newBatch() | |||
m.DB.binBatch = m.newBatch() | |||
m.DB.setBatch = m.newBatch() | |||
return m, nil | |||
} | |||
func (m *Multi) newBatch() *batch { | |||
return m.l.newBatch(m.bucket.NewWriteBatch(), &multiBatchLocker{}, nil) | |||
} | |||
func (m *Multi) Close() error { | |||
if m.bucket == nil { | |||
return ErrMultiDone | |||
} | |||
m.l.wLock.Unlock() | |||
m.bucket = nil | |||
return nil | |||
} | |||
func (m *Multi) Select(index int) error { | |||
if index < 0 || index >= int(MaxDBNumber) { | |||
return fmt.Errorf("invalid db index %d", index) | |||
} | |||
m.DB.index = uint8(index) | |||
return nil | |||
} |
@ -0,0 +1,128 @@ | |||
package nodb | |||
import ( | |||
"fmt" | |||
"sync" | |||
"time" | |||
"github.com/lunny/log" | |||
"github.com/lunny/nodb/config" | |||
"github.com/lunny/nodb/store" | |||
) | |||
type Nodb struct { | |||
cfg *config.Config | |||
ldb *store.DB | |||
dbs [MaxDBNumber]*DB | |||
quit chan struct{} | |||
jobs *sync.WaitGroup | |||
binlog *BinLog | |||
wLock sync.RWMutex //allow one write at same time | |||
commitLock sync.Mutex //allow one write commit at same time | |||
} | |||
func Open(cfg *config.Config) (*Nodb, error) { | |||
if len(cfg.DataDir) == 0 { | |||
cfg.DataDir = config.DefaultDataDir | |||
} | |||
ldb, err := store.Open(cfg) | |||
if err != nil { | |||
return nil, err | |||
} | |||
l := new(Nodb) | |||
l.quit = make(chan struct{}) | |||
l.jobs = new(sync.WaitGroup) | |||
l.ldb = ldb | |||
if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 { | |||
l.binlog, err = NewBinLog(cfg) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} else { | |||
l.binlog = nil | |||
} | |||
for i := uint8(0); i < MaxDBNumber; i++ { | |||
l.dbs[i] = l.newDB(i) | |||
} | |||
l.activeExpireCycle() | |||
return l, nil | |||
} | |||
func (l *Nodb) Close() { | |||
close(l.quit) | |||
l.jobs.Wait() | |||
l.ldb.Close() | |||
if l.binlog != nil { | |||
l.binlog.Close() | |||
l.binlog = nil | |||
} | |||
} | |||
func (l *Nodb) Select(index int) (*DB, error) { | |||
if index < 0 || index >= int(MaxDBNumber) { | |||
return nil, fmt.Errorf("invalid db index %d", index) | |||
} | |||
return l.dbs[index], nil | |||
} | |||
func (l *Nodb) FlushAll() error { | |||
for index, db := range l.dbs { | |||
if _, err := db.FlushAll(); err != nil { | |||
log.Error("flush db %d error %s", index, err.Error()) | |||
} | |||
} | |||
return nil | |||
} | |||
// very dangerous to use | |||
func (l *Nodb) DataDB() *store.DB { | |||
return l.ldb | |||
} | |||
func (l *Nodb) activeExpireCycle() { | |||
var executors []*elimination = make([]*elimination, len(l.dbs)) | |||
for i, db := range l.dbs { | |||
executors[i] = db.newEliminator() | |||
} | |||
l.jobs.Add(1) | |||
go func() { | |||
tick := time.NewTicker(1 * time.Second) | |||
end := false | |||
done := make(chan struct{}) | |||
for !end { | |||
select { | |||
case <-tick.C: | |||
go func() { | |||
for _, eli := range executors { | |||
eli.active() | |||
} | |||
done <- struct{}{} | |||
}() | |||
<-done | |||
case <-l.quit: | |||
end = true | |||
break | |||
} | |||
} | |||
tick.Stop() | |||
l.jobs.Done() | |||
}() | |||
} |
@ -0,0 +1,171 @@ | |||
package nodb | |||
import ( | |||
"fmt" | |||
"sync" | |||
"github.com/lunny/nodb/store" | |||
) | |||
type ibucket interface { | |||
Get(key []byte) ([]byte, error) | |||
Put(key []byte, value []byte) error | |||
Delete(key []byte) error | |||
NewIterator() *store.Iterator | |||
NewWriteBatch() store.WriteBatch | |||
RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator | |||
RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator | |||
RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator | |||
RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator | |||
} | |||
type DB struct { | |||
l *Nodb | |||
sdb *store.DB | |||
bucket ibucket | |||
index uint8 | |||
kvBatch *batch | |||
listBatch *batch | |||
hashBatch *batch | |||
zsetBatch *batch | |||
binBatch *batch | |||
setBatch *batch | |||
status uint8 | |||
} | |||
func (l *Nodb) newDB(index uint8) *DB { | |||
d := new(DB) | |||
d.l = l | |||
d.sdb = l.ldb | |||
d.bucket = d.sdb | |||
d.status = DBAutoCommit | |||
d.index = index | |||
d.kvBatch = d.newBatch() | |||
d.listBatch = d.newBatch() | |||
d.hashBatch = d.newBatch() | |||
d.zsetBatch = d.newBatch() | |||
d.binBatch = d.newBatch() | |||
d.setBatch = d.newBatch() | |||
return d | |||
} | |||
func (db *DB) newBatch() *batch { | |||
return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}, nil) | |||
} | |||
func (db *DB) Index() int { | |||
return int(db.index) | |||
} | |||
func (db *DB) IsAutoCommit() bool { | |||
return db.status == DBAutoCommit | |||
} | |||
func (db *DB) FlushAll() (drop int64, err error) { | |||
all := [...](func() (int64, error)){ | |||
db.flush, | |||
db.lFlush, | |||
db.hFlush, | |||
db.zFlush, | |||
db.bFlush, | |||
db.sFlush} | |||
for _, flush := range all { | |||
if n, e := flush(); e != nil { | |||
err = e | |||
return | |||
} else { | |||
drop += n | |||
} | |||
} | |||
return | |||
} | |||
func (db *DB) newEliminator() *elimination { | |||
eliminator := newEliminator(db) | |||
eliminator.regRetireContext(KVType, db.kvBatch, db.delete) | |||
eliminator.regRetireContext(ListType, db.listBatch, db.lDelete) | |||
eliminator.regRetireContext(HashType, db.hashBatch, db.hDelete) | |||
eliminator.regRetireContext(ZSetType, db.zsetBatch, db.zDelete) | |||
eliminator.regRetireContext(BitType, db.binBatch, db.bDelete) | |||
eliminator.regRetireContext(SetType, db.setBatch, db.sDelete) | |||
return eliminator | |||
} | |||
func (db *DB) flushRegion(t *batch, minKey []byte, maxKey []byte) (drop int64, err error) { | |||
it := db.bucket.RangeIterator(minKey, maxKey, store.RangeROpen) | |||
for ; it.Valid(); it.Next() { | |||
t.Delete(it.RawKey()) | |||
drop++ | |||
if drop&1023 == 0 { | |||
if err = t.Commit(); err != nil { | |||
return | |||
} | |||
} | |||
} | |||
it.Close() | |||
return | |||
} | |||
func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { | |||
var deleteFunc func(t *batch, key []byte) int64 | |||
var metaDataType byte | |||
switch dataType { | |||
case KVType: | |||
deleteFunc = db.delete | |||
metaDataType = KVType | |||
case ListType: | |||
deleteFunc = db.lDelete | |||
metaDataType = LMetaType | |||
case HashType: | |||
deleteFunc = db.hDelete | |||
metaDataType = HSizeType | |||
case ZSetType: | |||
deleteFunc = db.zDelete | |||
metaDataType = ZSizeType | |||
case BitType: | |||
deleteFunc = db.bDelete | |||
metaDataType = BitMetaType | |||
case SetType: | |||
deleteFunc = db.sDelete | |||
metaDataType = SSizeType | |||
default: | |||
return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType]) | |||
} | |||
var keys [][]byte | |||
keys, err = db.scan(metaDataType, nil, 1024, false, "") | |||
for len(keys) != 0 || err != nil { | |||
for _, key := range keys { | |||
deleteFunc(t, key) | |||
db.rmExpire(t, dataType, key) | |||
} | |||
if err = t.Commit(); err != nil { | |||
return | |||
} else { | |||
drop += int64(len(keys)) | |||
} | |||
keys, err = db.scan(metaDataType, nil, 1024, false, "") | |||
} | |||
return | |||
} |
@ -0,0 +1,312 @@ | |||
package nodb | |||
import ( | |||
"bufio" | |||
"bytes" | |||
"errors" | |||
"io" | |||
"os" | |||
"time" | |||
"github.com/lunny/log" | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
const ( | |||
maxReplBatchNum = 100 | |||
maxReplLogSize = 1 * 1024 * 1024 | |||
) | |||
var ( | |||
ErrSkipEvent = errors.New("skip to next event") | |||
) | |||
var ( | |||
errInvalidBinLogEvent = errors.New("invalid binglog event") | |||
errInvalidBinLogFile = errors.New("invalid binlog file") | |||
) | |||
type replBatch struct { | |||
wb driver.IWriteBatch | |||
events [][]byte | |||
l *Nodb | |||
lastHead *BinLogHead | |||
} | |||
func (b *replBatch) Commit() error { | |||
b.l.commitLock.Lock() | |||
defer b.l.commitLock.Unlock() | |||
err := b.wb.Commit() | |||
if err != nil { | |||
b.Rollback() | |||
return err | |||
} | |||
if b.l.binlog != nil { | |||
if err = b.l.binlog.Log(b.events...); err != nil { | |||
b.Rollback() | |||
return err | |||
} | |||
} | |||
b.events = [][]byte{} | |||
b.lastHead = nil | |||
return nil | |||
} | |||
func (b *replBatch) Rollback() error { | |||
b.wb.Rollback() | |||
b.events = [][]byte{} | |||
b.lastHead = nil | |||
return nil | |||
} | |||
func (l *Nodb) replicateEvent(b *replBatch, event []byte) error { | |||
if len(event) == 0 { | |||
return errInvalidBinLogEvent | |||
} | |||
b.events = append(b.events, event) | |||
logType := uint8(event[0]) | |||
switch logType { | |||
case BinLogTypePut: | |||
return l.replicatePutEvent(b, event) | |||
case BinLogTypeDeletion: | |||
return l.replicateDeleteEvent(b, event) | |||
default: | |||
return errInvalidBinLogEvent | |||
} | |||
} | |||
func (l *Nodb) replicatePutEvent(b *replBatch, event []byte) error { | |||
key, value, err := decodeBinLogPut(event) | |||
if err != nil { | |||
return err | |||
} | |||
b.wb.Put(key, value) | |||
return nil | |||
} | |||
func (l *Nodb) replicateDeleteEvent(b *replBatch, event []byte) error { | |||
key, err := decodeBinLogDelete(event) | |||
if err != nil { | |||
return err | |||
} | |||
b.wb.Delete(key) | |||
return nil | |||
} | |||
func ReadEventFromReader(rb io.Reader, f func(head *BinLogHead, event []byte) error) error { | |||
head := &BinLogHead{} | |||
var err error | |||
for { | |||
if err = head.Read(rb); err != nil { | |||
if err == io.EOF { | |||
break | |||
} else { | |||
return err | |||
} | |||
} | |||
var dataBuf bytes.Buffer | |||
if _, err = io.CopyN(&dataBuf, rb, int64(head.PayloadLen)); err != nil { | |||
return err | |||
} | |||
err = f(head, dataBuf.Bytes()) | |||
if err != nil && err != ErrSkipEvent { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func (l *Nodb) ReplicateFromReader(rb io.Reader) error { | |||
b := new(replBatch) | |||
b.wb = l.ldb.NewWriteBatch() | |||
b.l = l | |||
f := func(head *BinLogHead, event []byte) error { | |||
if b.lastHead == nil { | |||
b.lastHead = head | |||
} else if !b.lastHead.InSameBatch(head) { | |||
if err := b.Commit(); err != nil { | |||
log.Fatal("replication error %s, skip to next", err.Error()) | |||
return ErrSkipEvent | |||
} | |||
b.lastHead = head | |||
} | |||
err := l.replicateEvent(b, event) | |||
if err != nil { | |||
log.Fatal("replication error %s, skip to next", err.Error()) | |||
return ErrSkipEvent | |||
} | |||
return nil | |||
} | |||
err := ReadEventFromReader(rb, f) | |||
if err != nil { | |||
b.Rollback() | |||
return err | |||
} | |||
return b.Commit() | |||
} | |||
func (l *Nodb) ReplicateFromData(data []byte) error { | |||
rb := bytes.NewReader(data) | |||
err := l.ReplicateFromReader(rb) | |||
return err | |||
} | |||
func (l *Nodb) ReplicateFromBinLog(filePath string) error { | |||
f, err := os.Open(filePath) | |||
if err != nil { | |||
return err | |||
} | |||
rb := bufio.NewReaderSize(f, 4096) | |||
err = l.ReplicateFromReader(rb) | |||
f.Close() | |||
return err | |||
} | |||
// try to read events, if no events read, try to wait the new event singal until timeout seconds | |||
func (l *Nodb) ReadEventsToTimeout(info *BinLogAnchor, w io.Writer, timeout int) (n int, err error) { | |||
lastIndex := info.LogFileIndex | |||
lastPos := info.LogPos | |||
n = 0 | |||
if l.binlog == nil { | |||
//binlog not supported | |||
info.LogFileIndex = 0 | |||
info.LogPos = 0 | |||
return | |||
} | |||
n, err = l.ReadEventsTo(info, w) | |||
if err == nil && info.LogFileIndex == lastIndex && info.LogPos == lastPos { | |||
//no events read | |||
select { | |||
case <-l.binlog.Wait(): | |||
case <-time.After(time.Duration(timeout) * time.Second): | |||
} | |||
return l.ReadEventsTo(info, w) | |||
} | |||
return | |||
} | |||
func (l *Nodb) ReadEventsTo(info *BinLogAnchor, w io.Writer) (n int, err error) { | |||
n = 0 | |||
if l.binlog == nil { | |||
//binlog not supported | |||
info.LogFileIndex = 0 | |||
info.LogPos = 0 | |||
return | |||
} | |||
index := info.LogFileIndex | |||
offset := info.LogPos | |||
filePath := l.binlog.FormatLogFilePath(index) | |||
var f *os.File | |||
f, err = os.Open(filePath) | |||
if os.IsNotExist(err) { | |||
lastIndex := l.binlog.LogFileIndex() | |||
if index == lastIndex { | |||
//no binlog at all | |||
info.LogPos = 0 | |||
} else { | |||
//slave binlog info had lost | |||
info.LogFileIndex = -1 | |||
} | |||
} | |||
if err != nil { | |||
if os.IsNotExist(err) { | |||
err = nil | |||
} | |||
return | |||
} | |||
defer f.Close() | |||
var fileSize int64 | |||
st, _ := f.Stat() | |||
fileSize = st.Size() | |||
if fileSize == info.LogPos { | |||
return | |||
} | |||
if _, err = f.Seek(offset, os.SEEK_SET); err != nil { | |||
//may be invliad seek offset | |||
return | |||
} | |||
var lastHead *BinLogHead = nil | |||
head := &BinLogHead{} | |||
batchNum := 0 | |||
for { | |||
if err = head.Read(f); err != nil { | |||
if err == io.EOF { | |||
//we will try to use next binlog | |||
if index < l.binlog.LogFileIndex() { | |||
info.LogFileIndex += 1 | |||
info.LogPos = 0 | |||
} | |||
err = nil | |||
return | |||
} else { | |||
return | |||
} | |||
} | |||
if lastHead == nil { | |||
lastHead = head | |||
batchNum++ | |||
} else if !lastHead.InSameBatch(head) { | |||
lastHead = head | |||
batchNum++ | |||
if batchNum > maxReplBatchNum || n > maxReplLogSize { | |||
return | |||
} | |||
} | |||
if err = head.Write(w); err != nil { | |||
return | |||
} | |||
if _, err = io.CopyN(w, f, int64(head.PayloadLen)); err != nil { | |||
return | |||
} | |||
n += (head.Len() + int(head.PayloadLen)) | |||
info.LogPos = info.LogPos + int64(head.Len()) + int64(head.PayloadLen) | |||
} | |||
return | |||
} |
@ -0,0 +1,144 @@ | |||
package nodb | |||
import ( | |||
"bytes" | |||
"errors" | |||
"regexp" | |||
"github.com/lunny/nodb/store" | |||
) | |||
var errDataType = errors.New("error data type") | |||
var errMetaKey = errors.New("error meta key") | |||
// Seek search the prefix key | |||
func (db *DB) Seek(key []byte) (*store.Iterator, error) { | |||
return db.seek(KVType, key) | |||
} | |||
func (db *DB) seek(dataType byte, key []byte) (*store.Iterator, error) { | |||
var minKey []byte | |||
var err error | |||
if len(key) > 0 { | |||
if err = checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
if minKey, err = db.encodeMetaKey(dataType, key); err != nil { | |||
return nil, err | |||
} | |||
} else { | |||
if minKey, err = db.encodeMinKey(dataType); err != nil { | |||
return nil, err | |||
} | |||
} | |||
it := db.bucket.NewIterator() | |||
it.Seek(minKey) | |||
return it, nil | |||
} | |||
func (db *DB) MaxKey() ([]byte, error) { | |||
return db.encodeMaxKey(KVType) | |||
} | |||
func (db *DB) Key(it *store.Iterator) ([]byte, error) { | |||
return db.decodeMetaKey(KVType, it.Key()) | |||
} | |||
func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
var minKey, maxKey []byte | |||
var err error | |||
var r *regexp.Regexp | |||
if len(match) > 0 { | |||
if r, err = regexp.Compile(match); err != nil { | |||
return nil, err | |||
} | |||
} | |||
if len(key) > 0 { | |||
if err = checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
if minKey, err = db.encodeMetaKey(dataType, key); err != nil { | |||
return nil, err | |||
} | |||
} else { | |||
if minKey, err = db.encodeMinKey(dataType); err != nil { | |||
return nil, err | |||
} | |||
} | |||
if maxKey, err = db.encodeMaxKey(dataType); err != nil { | |||
return nil, err | |||
} | |||
if count <= 0 { | |||
count = defaultScanCount | |||
} | |||
v := make([][]byte, 0, count) | |||
it := db.bucket.NewIterator() | |||
it.Seek(minKey) | |||
if !inclusive { | |||
if it.Valid() && bytes.Equal(it.RawKey(), minKey) { | |||
it.Next() | |||
} | |||
} | |||
for i := 0; it.Valid() && i < count && bytes.Compare(it.RawKey(), maxKey) < 0; it.Next() { | |||
if k, err := db.decodeMetaKey(dataType, it.Key()); err != nil { | |||
continue | |||
} else if r != nil && !r.Match(k) { | |||
continue | |||
} else { | |||
v = append(v, k) | |||
i++ | |||
} | |||
} | |||
it.Close() | |||
return v, nil | |||
} | |||
func (db *DB) encodeMinKey(dataType byte) ([]byte, error) { | |||
return db.encodeMetaKey(dataType, nil) | |||
} | |||
func (db *DB) encodeMaxKey(dataType byte) ([]byte, error) { | |||
k, err := db.encodeMetaKey(dataType, nil) | |||
if err != nil { | |||
return nil, err | |||
} | |||
k[len(k)-1] = dataType + 1 | |||
return k, nil | |||
} | |||
func (db *DB) encodeMetaKey(dataType byte, key []byte) ([]byte, error) { | |||
switch dataType { | |||
case KVType: | |||
return db.encodeKVKey(key), nil | |||
case LMetaType: | |||
return db.lEncodeMetaKey(key), nil | |||
case HSizeType: | |||
return db.hEncodeSizeKey(key), nil | |||
case ZSizeType: | |||
return db.zEncodeSizeKey(key), nil | |||
case BitMetaType: | |||
return db.bEncodeMetaKey(key), nil | |||
case SSizeType: | |||
return db.sEncodeSizeKey(key), nil | |||
default: | |||
return nil, errDataType | |||
} | |||
} | |||
func (db *DB) decodeMetaKey(dataType byte, ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType { | |||
return nil, errMetaKey | |||
} | |||
return ek[2:], nil | |||
} |
@ -0,0 +1,61 @@ | |||
package store | |||
import ( | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
type DB struct { | |||
driver.IDB | |||
} | |||
func (db *DB) NewIterator() *Iterator { | |||
it := new(Iterator) | |||
it.it = db.IDB.NewIterator() | |||
return it | |||
} | |||
func (db *DB) NewWriteBatch() WriteBatch { | |||
return db.IDB.NewWriteBatch() | |||
} | |||
func (db *DB) NewSnapshot() (*Snapshot, error) { | |||
var err error | |||
s := &Snapshot{} | |||
if s.ISnapshot, err = db.IDB.NewSnapshot(); err != nil { | |||
return nil, err | |||
} | |||
return s, nil | |||
} | |||
func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { | |||
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) | |||
} | |||
func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { | |||
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) | |||
} | |||
//count < 0, unlimit. | |||
// | |||
//offset must >= 0, if < 0, will get nothing. | |||
func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { | |||
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) | |||
} | |||
//count < 0, unlimit. | |||
// | |||
//offset must >= 0, if < 0, will get nothing. | |||
func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { | |||
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) | |||
} | |||
func (db *DB) Begin() (*Tx, error) { | |||
tx, err := db.IDB.Begin() | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &Tx{tx}, nil | |||
} |
@ -0,0 +1,39 @@ | |||
package driver | |||
type BatchPuter interface { | |||
BatchPut([]Write) error | |||
} | |||
type Write struct { | |||
Key []byte | |||
Value []byte | |||
} | |||
type WriteBatch struct { | |||
batch BatchPuter | |||
wb []Write | |||
} | |||
func (w *WriteBatch) Put(key, value []byte) { | |||
if value == nil { | |||
value = []byte{} | |||
} | |||
w.wb = append(w.wb, Write{key, value}) | |||
} | |||
func (w *WriteBatch) Delete(key []byte) { | |||
w.wb = append(w.wb, Write{key, nil}) | |||
} | |||
func (w *WriteBatch) Commit() error { | |||
return w.batch.BatchPut(w.wb) | |||
} | |||
func (w *WriteBatch) Rollback() error { | |||
w.wb = w.wb[0:0] | |||
return nil | |||
} | |||
func NewWriteBatch(puter BatchPuter) IWriteBatch { | |||
return &WriteBatch{puter, []Write{}} | |||
} |
@ -0,0 +1,67 @@ | |||
package driver | |||
import ( | |||
"errors" | |||
) | |||
var ( | |||
ErrTxSupport = errors.New("transaction is not supported") | |||
) | |||
type IDB interface { | |||
Close() error | |||
Get(key []byte) ([]byte, error) | |||
Put(key []byte, value []byte) error | |||
Delete(key []byte) error | |||
NewIterator() IIterator | |||
NewWriteBatch() IWriteBatch | |||
NewSnapshot() (ISnapshot, error) | |||
Begin() (Tx, error) | |||
} | |||
type ISnapshot interface { | |||
Get(key []byte) ([]byte, error) | |||
NewIterator() IIterator | |||
Close() | |||
} | |||
type IIterator interface { | |||
Close() error | |||
First() | |||
Last() | |||
Seek(key []byte) | |||
Next() | |||
Prev() | |||
Valid() bool | |||
Key() []byte | |||
Value() []byte | |||
} | |||
type IWriteBatch interface { | |||
Put(key []byte, value []byte) | |||
Delete(key []byte) | |||
Commit() error | |||
Rollback() error | |||
} | |||
type Tx interface { | |||
Get(key []byte) ([]byte, error) | |||
Put(key []byte, value []byte) error | |||
Delete(key []byte) error | |||
NewIterator() IIterator | |||
NewWriteBatch() IWriteBatch | |||
Commit() error | |||
Rollback() error | |||
} |
@ -0,0 +1,46 @@ | |||
package driver | |||
import ( | |||
"fmt" | |||
"github.com/lunny/nodb/config" | |||
) | |||
type Store interface { | |||
String() string | |||
Open(path string, cfg *config.Config) (IDB, error) | |||
Repair(path string, cfg *config.Config) error | |||
} | |||
var dbs = map[string]Store{} | |||
func Register(s Store) { | |||
name := s.String() | |||
if _, ok := dbs[name]; ok { | |||
panic(fmt.Errorf("store %s is registered", s)) | |||
} | |||
dbs[name] = s | |||
} | |||
func ListStores() []string { | |||
s := []string{} | |||
for k, _ := range dbs { | |||
s = append(s, k) | |||
} | |||
return s | |||
} | |||
func GetStore(cfg *config.Config) (Store, error) { | |||
if len(cfg.DBName) == 0 { | |||
cfg.DBName = config.DefaultDBName | |||
} | |||
s, ok := dbs[cfg.DBName] | |||
if !ok { | |||
return nil, fmt.Errorf("store %s is not registered", cfg.DBName) | |||
} | |||
return s, nil | |||
} |
@ -0,0 +1,27 @@ | |||
package goleveldb | |||
import ( | |||
"github.com/syndtr/goleveldb/leveldb" | |||
) | |||
type WriteBatch struct { | |||
db *DB | |||
wbatch *leveldb.Batch | |||
} | |||
func (w *WriteBatch) Put(key, value []byte) { | |||
w.wbatch.Put(key, value) | |||
} | |||
func (w *WriteBatch) Delete(key []byte) { | |||
w.wbatch.Delete(key) | |||
} | |||
func (w *WriteBatch) Commit() error { | |||
return w.db.db.Write(w.wbatch, nil) | |||
} | |||
func (w *WriteBatch) Rollback() error { | |||
w.wbatch.Reset() | |||
return nil | |||
} |
@ -0,0 +1,4 @@ | |||
package goleveldb | |||
const DBName = "goleveldb" | |||
const MemDBName = "memory" |
@ -0,0 +1,187 @@ | |||
package goleveldb | |||
import ( | |||
"github.com/syndtr/goleveldb/leveldb" | |||
"github.com/syndtr/goleveldb/leveldb/cache" | |||
"github.com/syndtr/goleveldb/leveldb/filter" | |||
"github.com/syndtr/goleveldb/leveldb/opt" | |||
"github.com/syndtr/goleveldb/leveldb/storage" | |||
"github.com/lunny/nodb/config" | |||
"github.com/lunny/nodb/store/driver" | |||
"os" | |||
) | |||
const defaultFilterBits int = 10 | |||
type Store struct { | |||
} | |||
func (s Store) String() string { | |||
return DBName | |||
} | |||
type MemStore struct { | |||
} | |||
func (s MemStore) String() string { | |||
return MemDBName | |||
} | |||
type DB struct { | |||
path string | |||
cfg *config.LevelDBConfig | |||
db *leveldb.DB | |||
opts *opt.Options | |||
iteratorOpts *opt.ReadOptions | |||
cache cache.Cache | |||
filter filter.Filter | |||
} | |||
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { | |||
if err := os.MkdirAll(path, os.ModePerm); err != nil { | |||
return nil, err | |||
} | |||
db := new(DB) | |||
db.path = path | |||
db.cfg = &cfg.LevelDB | |||
db.initOpts() | |||
var err error | |||
db.db, err = leveldb.OpenFile(db.path, db.opts) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return db, nil | |||
} | |||
func (s Store) Repair(path string, cfg *config.Config) error { | |||
db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB)) | |||
if err != nil { | |||
return err | |||
} | |||
db.Close() | |||
return nil | |||
} | |||
func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) { | |||
db := new(DB) | |||
db.path = path | |||
db.cfg = &cfg.LevelDB | |||
db.initOpts() | |||
var err error | |||
db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return db, nil | |||
} | |||
func (s MemStore) Repair(path string, cfg *config.Config) error { | |||
return nil | |||
} | |||
func (db *DB) initOpts() { | |||
db.opts = newOptions(db.cfg) | |||
db.iteratorOpts = &opt.ReadOptions{} | |||
db.iteratorOpts.DontFillCache = true | |||
} | |||
func newOptions(cfg *config.LevelDBConfig) *opt.Options { | |||
opts := &opt.Options{} | |||
opts.ErrorIfMissing = false | |||
cfg.Adjust() | |||
//opts.BlockCacher = cache.NewLRU(cfg.CacheSize) | |||
opts.BlockCacheCapacity = cfg.CacheSize | |||
//we must use bloomfilter | |||
opts.Filter = filter.NewBloomFilter(defaultFilterBits) | |||
if !cfg.Compression { | |||
opts.Compression = opt.NoCompression | |||
} else { | |||
opts.Compression = opt.SnappyCompression | |||
} | |||
opts.BlockSize = cfg.BlockSize | |||
opts.WriteBuffer = cfg.WriteBufferSize | |||
return opts | |||
} | |||
func (db *DB) Close() error { | |||
return db.db.Close() | |||
} | |||
func (db *DB) Put(key, value []byte) error { | |||
return db.db.Put(key, value, nil) | |||
} | |||
func (db *DB) Get(key []byte) ([]byte, error) { | |||
v, err := db.db.Get(key, nil) | |||
if err == leveldb.ErrNotFound { | |||
return nil, nil | |||
} | |||
return v, nil | |||
} | |||
func (db *DB) Delete(key []byte) error { | |||
return db.db.Delete(key, nil) | |||
} | |||
func (db *DB) NewWriteBatch() driver.IWriteBatch { | |||
wb := &WriteBatch{ | |||
db: db, | |||
wbatch: new(leveldb.Batch), | |||
} | |||
return wb | |||
} | |||
func (db *DB) NewIterator() driver.IIterator { | |||
it := &Iterator{ | |||
db.db.NewIterator(nil, db.iteratorOpts), | |||
} | |||
return it | |||
} | |||
func (db *DB) Begin() (driver.Tx, error) { | |||
return nil, driver.ErrTxSupport | |||
} | |||
func (db *DB) NewSnapshot() (driver.ISnapshot, error) { | |||
snapshot, err := db.db.GetSnapshot() | |||
if err != nil { | |||
return nil, err | |||
} | |||
s := &Snapshot{ | |||
db: db, | |||
snp: snapshot, | |||
} | |||
return s, nil | |||
} | |||
func init() { | |||
driver.Register(Store{}) | |||
driver.Register(MemStore{}) | |||
} |
@ -0,0 +1,49 @@ | |||
package goleveldb | |||
import ( | |||
"github.com/syndtr/goleveldb/leveldb/iterator" | |||
) | |||
type Iterator struct { | |||
it iterator.Iterator | |||
} | |||
func (it *Iterator) Key() []byte { | |||
return it.it.Key() | |||
} | |||
func (it *Iterator) Value() []byte { | |||
return it.it.Value() | |||
} | |||
func (it *Iterator) Close() error { | |||
if it.it != nil { | |||
it.it.Release() | |||
it.it = nil | |||
} | |||
return nil | |||
} | |||
func (it *Iterator) Valid() bool { | |||
return it.it.Valid() | |||
} | |||
func (it *Iterator) Next() { | |||
it.it.Next() | |||
} | |||
func (it *Iterator) Prev() { | |||
it.it.Prev() | |||
} | |||
func (it *Iterator) First() { | |||
it.it.First() | |||
} | |||
func (it *Iterator) Last() { | |||
it.it.Last() | |||
} | |||
func (it *Iterator) Seek(key []byte) { | |||
it.it.Seek(key) | |||
} |
@ -0,0 +1,26 @@ | |||
package goleveldb | |||
import ( | |||
"github.com/lunny/nodb/store/driver" | |||
"github.com/syndtr/goleveldb/leveldb" | |||
) | |||
type Snapshot struct { | |||
db *DB | |||
snp *leveldb.Snapshot | |||
} | |||
func (s *Snapshot) Get(key []byte) ([]byte, error) { | |||
return s.snp.Get(key, s.db.iteratorOpts) | |||
} | |||
func (s *Snapshot) NewIterator() driver.IIterator { | |||
it := &Iterator{ | |||
s.snp.NewIterator(nil, s.db.iteratorOpts), | |||
} | |||
return it | |||
} | |||
func (s *Snapshot) Close() { | |||
s.snp.Release() | |||
} |
@ -0,0 +1,327 @@ | |||
package store | |||
import ( | |||
"bytes" | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
const ( | |||
IteratorForward uint8 = 0 | |||
IteratorBackward uint8 = 1 | |||
) | |||
const ( | |||
RangeClose uint8 = 0x00 | |||
RangeLOpen uint8 = 0x01 | |||
RangeROpen uint8 = 0x10 | |||
RangeOpen uint8 = 0x11 | |||
) | |||
// min must less or equal than max | |||
// | |||
// range type: | |||
// | |||
// close: [min, max] | |||
// open: (min, max) | |||
// lopen: (min, max] | |||
// ropen: [min, max) | |||
// | |||
type Range struct { | |||
Min []byte | |||
Max []byte | |||
Type uint8 | |||
} | |||
type Limit struct { | |||
Offset int | |||
Count int | |||
} | |||
type Iterator struct { | |||
it driver.IIterator | |||
} | |||
// Returns a copy of key. | |||
func (it *Iterator) Key() []byte { | |||
k := it.it.Key() | |||
if k == nil { | |||
return nil | |||
} | |||
return append([]byte{}, k...) | |||
} | |||
// Returns a copy of value. | |||
func (it *Iterator) Value() []byte { | |||
v := it.it.Value() | |||
if v == nil { | |||
return nil | |||
} | |||
return append([]byte{}, v...) | |||
} | |||
// Returns a reference of key. | |||
// you must be careful that it will be changed after next iterate. | |||
func (it *Iterator) RawKey() []byte { | |||
return it.it.Key() | |||
} | |||
// Returns a reference of value. | |||
// you must be careful that it will be changed after next iterate. | |||
func (it *Iterator) RawValue() []byte { | |||
return it.it.Value() | |||
} | |||
// Copy key to b, if b len is small or nil, returns a new one. | |||
func (it *Iterator) BufKey(b []byte) []byte { | |||
k := it.RawKey() | |||
if k == nil { | |||
return nil | |||
} | |||
if b == nil { | |||
b = []byte{} | |||
} | |||
b = b[0:0] | |||
return append(b, k...) | |||
} | |||
// Copy value to b, if b len is small or nil, returns a new one. | |||
func (it *Iterator) BufValue(b []byte) []byte { | |||
v := it.RawValue() | |||
if v == nil { | |||
return nil | |||
} | |||
if b == nil { | |||
b = []byte{} | |||
} | |||
b = b[0:0] | |||
return append(b, v...) | |||
} | |||
func (it *Iterator) Close() { | |||
if it.it != nil { | |||
it.it.Close() | |||
it.it = nil | |||
} | |||
} | |||
func (it *Iterator) Valid() bool { | |||
return it.it.Valid() | |||
} | |||
func (it *Iterator) Next() { | |||
it.it.Next() | |||
} | |||
func (it *Iterator) Prev() { | |||
it.it.Prev() | |||
} | |||
func (it *Iterator) SeekToFirst() { | |||
it.it.First() | |||
} | |||
func (it *Iterator) SeekToLast() { | |||
it.it.Last() | |||
} | |||
func (it *Iterator) Seek(key []byte) { | |||
it.it.Seek(key) | |||
} | |||
// Finds by key, if not found, nil returns. | |||
func (it *Iterator) Find(key []byte) []byte { | |||
it.Seek(key) | |||
if it.Valid() { | |||
k := it.RawKey() | |||
if k == nil { | |||
return nil | |||
} else if bytes.Equal(k, key) { | |||
return it.Value() | |||
} | |||
} | |||
return nil | |||
} | |||
// Finds by key, if not found, nil returns, else a reference of value returns. | |||
// you must be careful that it will be changed after next iterate. | |||
func (it *Iterator) RawFind(key []byte) []byte { | |||
it.Seek(key) | |||
if it.Valid() { | |||
k := it.RawKey() | |||
if k == nil { | |||
return nil | |||
} else if bytes.Equal(k, key) { | |||
return it.RawValue() | |||
} | |||
} | |||
return nil | |||
} | |||
type RangeLimitIterator struct { | |||
it *Iterator | |||
r *Range | |||
l *Limit | |||
step int | |||
//0 for IteratorForward, 1 for IteratorBackward | |||
direction uint8 | |||
} | |||
func (it *RangeLimitIterator) Key() []byte { | |||
return it.it.Key() | |||
} | |||
func (it *RangeLimitIterator) Value() []byte { | |||
return it.it.Value() | |||
} | |||
func (it *RangeLimitIterator) RawKey() []byte { | |||
return it.it.RawKey() | |||
} | |||
func (it *RangeLimitIterator) RawValue() []byte { | |||
return it.it.RawValue() | |||
} | |||
func (it *RangeLimitIterator) BufKey(b []byte) []byte { | |||
return it.it.BufKey(b) | |||
} | |||
func (it *RangeLimitIterator) BufValue(b []byte) []byte { | |||
return it.it.BufValue(b) | |||
} | |||
func (it *RangeLimitIterator) Valid() bool { | |||
if it.l.Offset < 0 { | |||
return false | |||
} else if !it.it.Valid() { | |||
return false | |||
} else if it.l.Count >= 0 && it.step >= it.l.Count { | |||
return false | |||
} | |||
if it.direction == IteratorForward { | |||
if it.r.Max != nil { | |||
r := bytes.Compare(it.it.RawKey(), it.r.Max) | |||
if it.r.Type&RangeROpen > 0 { | |||
return !(r >= 0) | |||
} else { | |||
return !(r > 0) | |||
} | |||
} | |||
} else { | |||
if it.r.Min != nil { | |||
r := bytes.Compare(it.it.RawKey(), it.r.Min) | |||
if it.r.Type&RangeLOpen > 0 { | |||
return !(r <= 0) | |||
} else { | |||
return !(r < 0) | |||
} | |||
} | |||
} | |||
return true | |||
} | |||
func (it *RangeLimitIterator) Next() { | |||
it.step++ | |||
if it.direction == IteratorForward { | |||
it.it.Next() | |||
} else { | |||
it.it.Prev() | |||
} | |||
} | |||
func (it *RangeLimitIterator) Close() { | |||
it.it.Close() | |||
} | |||
func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { | |||
return rangeLimitIterator(i, r, l, IteratorForward) | |||
} | |||
func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { | |||
return rangeLimitIterator(i, r, l, IteratorBackward) | |||
} | |||
func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { | |||
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward) | |||
} | |||
func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { | |||
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward) | |||
} | |||
func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator { | |||
it := new(RangeLimitIterator) | |||
it.it = i | |||
it.r = r | |||
it.l = l | |||
it.direction = direction | |||
it.step = 0 | |||
if l.Offset < 0 { | |||
return it | |||
} | |||
if direction == IteratorForward { | |||
if r.Min == nil { | |||
it.it.SeekToFirst() | |||
} else { | |||
it.it.Seek(r.Min) | |||
if r.Type&RangeLOpen > 0 { | |||
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) { | |||
it.it.Next() | |||
} | |||
} | |||
} | |||
} else { | |||
if r.Max == nil { | |||
it.it.SeekToLast() | |||
} else { | |||
it.it.Seek(r.Max) | |||
if !it.it.Valid() { | |||
it.it.SeekToLast() | |||
} else { | |||
if !bytes.Equal(it.it.RawKey(), r.Max) { | |||
it.it.Prev() | |||
} | |||
} | |||
if r.Type&RangeROpen > 0 { | |||
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) { | |||
it.it.Prev() | |||
} | |||
} | |||
} | |||
} | |||
for i := 0; i < l.Offset; i++ { | |||
if it.it.Valid() { | |||
if it.direction == IteratorForward { | |||
it.it.Next() | |||
} else { | |||
it.it.Prev() | |||
} | |||
} | |||
} | |||
return it | |||
} |
@ -0,0 +1,16 @@ | |||
package store | |||
import ( | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
type Snapshot struct { | |||
driver.ISnapshot | |||
} | |||
func (s *Snapshot) NewIterator() *Iterator { | |||
it := new(Iterator) | |||
it.it = s.ISnapshot.NewIterator() | |||
return it | |||
} |
@ -0,0 +1,51 @@ | |||
package store | |||
import ( | |||
"fmt" | |||
"os" | |||
"path" | |||
"github.com/lunny/nodb/config" | |||
"github.com/lunny/nodb/store/driver" | |||
_ "github.com/lunny/nodb/store/goleveldb" | |||
) | |||
func getStorePath(cfg *config.Config) string { | |||
return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) | |||
} | |||
func Open(cfg *config.Config) (*DB, error) { | |||
s, err := driver.GetStore(cfg) | |||
if err != nil { | |||
return nil, err | |||
} | |||
path := getStorePath(cfg) | |||
if err := os.MkdirAll(path, os.ModePerm); err != nil { | |||
return nil, err | |||
} | |||
idb, err := s.Open(path, cfg) | |||
if err != nil { | |||
return nil, err | |||
} | |||
db := &DB{idb} | |||
return db, nil | |||
} | |||
func Repair(cfg *config.Config) error { | |||
s, err := driver.GetStore(cfg) | |||
if err != nil { | |||
return err | |||
} | |||
path := getStorePath(cfg) | |||
return s.Repair(path, cfg) | |||
} | |||
func init() { | |||
} |
@ -0,0 +1,42 @@ | |||
package store | |||
import ( | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
type Tx struct { | |||
driver.Tx | |||
} | |||
func (tx *Tx) NewIterator() *Iterator { | |||
it := new(Iterator) | |||
it.it = tx.Tx.NewIterator() | |||
return it | |||
} | |||
func (tx *Tx) NewWriteBatch() WriteBatch { | |||
return tx.Tx.NewWriteBatch() | |||
} | |||
func (tx *Tx) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { | |||
return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) | |||
} | |||
func (tx *Tx) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { | |||
return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) | |||
} | |||
//count < 0, unlimit. | |||
// | |||
//offset must >= 0, if < 0, will get nothing. | |||
func (tx *Tx) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { | |||
return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) | |||
} | |||
//count < 0, unlimit. | |||
// | |||
//offset must >= 0, if < 0, will get nothing. | |||
func (tx *Tx) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { | |||
return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) | |||
} |
@ -0,0 +1,9 @@ | |||
package store | |||
import ( | |||
"github.com/lunny/nodb/store/driver" | |||
) | |||
type WriteBatch interface { | |||
driver.IWriteBatch | |||
} |
@ -0,0 +1,922 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"sort" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
const ( | |||
OPand uint8 = iota + 1 | |||
OPor | |||
OPxor | |||
OPnot | |||
) | |||
type BitPair struct { | |||
Pos int32 | |||
Val uint8 | |||
} | |||
type segBitInfo struct { | |||
Seq uint32 | |||
Off uint32 | |||
Val uint8 | |||
} | |||
type segBitInfoArray []segBitInfo | |||
const ( | |||
// byte | |||
segByteWidth uint32 = 9 | |||
segByteSize uint32 = 1 << segByteWidth | |||
// bit | |||
segBitWidth uint32 = segByteWidth + 3 | |||
segBitSize uint32 = segByteSize << 3 | |||
maxByteSize uint32 = 8 << 20 | |||
maxSegCount uint32 = maxByteSize / segByteSize | |||
minSeq uint32 = 0 | |||
maxSeq uint32 = uint32((maxByteSize << 3) - 1) | |||
) | |||
var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, | |||
4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, | |||
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, | |||
5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, | |||
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, | |||
5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, | |||
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, | |||
4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, | |||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, | |||
5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, | |||
6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, | |||
6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8} | |||
var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255} | |||
var emptySegment []byte = make([]byte, segByteSize, segByteSize) | |||
var fillSegment []byte = func() []byte { | |||
data := make([]byte, segByteSize, segByteSize) | |||
for i := uint32(0); i < segByteSize; i++ { | |||
data[i] = 0xff | |||
} | |||
return data | |||
}() | |||
var errBinKey = errors.New("invalid bin key") | |||
var errOffset = errors.New("invalid offset") | |||
var errDuplicatePos = errors.New("duplicate bit pos") | |||
func getBit(sz []byte, offset uint32) uint8 { | |||
index := offset >> 3 | |||
if index >= uint32(len(sz)) { | |||
return 0 // error("overflow") | |||
} | |||
offset -= index << 3 | |||
return sz[index] >> offset & 1 | |||
} | |||
func setBit(sz []byte, offset uint32, val uint8) bool { | |||
if val != 1 && val != 0 { | |||
return false // error("invalid val") | |||
} | |||
index := offset >> 3 | |||
if index >= uint32(len(sz)) { | |||
return false // error("overflow") | |||
} | |||
offset -= index << 3 | |||
if sz[index]>>offset&1 != val { | |||
sz[index] ^= (1 << offset) | |||
} | |||
return true | |||
} | |||
func (datas segBitInfoArray) Len() int { | |||
return len(datas) | |||
} | |||
func (datas segBitInfoArray) Less(i, j int) bool { | |||
res := (datas)[i].Seq < (datas)[j].Seq | |||
if !res && (datas)[i].Seq == (datas)[j].Seq { | |||
res = (datas)[i].Off < (datas)[j].Off | |||
} | |||
return res | |||
} | |||
func (datas segBitInfoArray) Swap(i, j int) { | |||
datas[i], datas[j] = datas[j], datas[i] | |||
} | |||
func (db *DB) bEncodeMetaKey(key []byte) []byte { | |||
mk := make([]byte, len(key)+2) | |||
mk[0] = db.index | |||
mk[1] = BitMetaType | |||
copy(mk[2:], key) | |||
return mk | |||
} | |||
func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) { | |||
if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType { | |||
return nil, errBinKey | |||
} | |||
return bkey[2:], nil | |||
} | |||
func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte { | |||
bk := make([]byte, len(key)+8) | |||
pos := 0 | |||
bk[pos] = db.index | |||
pos++ | |||
bk[pos] = BitType | |||
pos++ | |||
binary.BigEndian.PutUint16(bk[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(bk[pos:], key) | |||
pos += len(key) | |||
binary.BigEndian.PutUint32(bk[pos:], seq) | |||
return bk | |||
} | |||
func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) { | |||
if len(bkey) < 8 || bkey[0] != db.index { | |||
err = errBinKey | |||
return | |||
} | |||
keyLen := binary.BigEndian.Uint16(bkey[2:4]) | |||
if int(keyLen+8) != len(bkey) { | |||
err = errBinKey | |||
return | |||
} | |||
key = bkey[4 : 4+keyLen] | |||
seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:])) | |||
return | |||
} | |||
func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 { | |||
var offByteSize uint32 = (off >> 3) + 1 | |||
if offByteSize > segByteSize { | |||
offByteSize = segByteSize | |||
} | |||
return seq<<segByteWidth + offByteSize | |||
} | |||
func (db *DB) bParseOffset(key []byte, offset int32) (seq uint32, off uint32, err error) { | |||
if offset < 0 { | |||
if tailSeq, tailOff, e := db.bGetMeta(key); e != nil { | |||
err = e | |||
return | |||
} else if tailSeq >= 0 { | |||
offset += int32((uint32(tailSeq)<<segBitWidth | uint32(tailOff)) + 1) | |||
if offset < 0 { | |||
err = errOffset | |||
return | |||
} | |||
} | |||
} | |||
off = uint32(offset) | |||
seq = off >> segBitWidth | |||
off &= (segBitSize - 1) | |||
return | |||
} | |||
func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) { | |||
var v []byte | |||
mk := db.bEncodeMetaKey(key) | |||
v, err = db.bucket.Get(mk) | |||
if err != nil { | |||
return | |||
} | |||
if v != nil { | |||
tailSeq = int32(binary.LittleEndian.Uint32(v[0:4])) | |||
tailOff = int32(binary.LittleEndian.Uint32(v[4:8])) | |||
} else { | |||
tailSeq = -1 | |||
tailOff = -1 | |||
} | |||
return | |||
} | |||
func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) { | |||
ek := db.bEncodeMetaKey(key) | |||
buf := make([]byte, 8) | |||
binary.LittleEndian.PutUint32(buf[0:4], tailSeq) | |||
binary.LittleEndian.PutUint32(buf[4:8], tailOff) | |||
t.Put(ek, buf) | |||
return | |||
} | |||
func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) { | |||
var tseq, toff int32 | |||
var update bool = false | |||
if tseq, toff, err = db.bGetMeta(key); err != nil { | |||
return | |||
} else if tseq < 0 { | |||
update = true | |||
} else { | |||
tailSeq = uint32(MaxInt32(tseq, 0)) | |||
tailOff = uint32(MaxInt32(toff, 0)) | |||
update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) | |||
} | |||
if update { | |||
db.bSetMeta(t, key, seq, off) | |||
tailSeq = seq | |||
tailOff = off | |||
} | |||
return | |||
} | |||
func (db *DB) bDelete(t *batch, key []byte) (drop int64) { | |||
mk := db.bEncodeMetaKey(key) | |||
t.Delete(mk) | |||
minKey := db.bEncodeBinKey(key, minSeq) | |||
maxKey := db.bEncodeBinKey(key, maxSeq) | |||
it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) | |||
for ; it.Valid(); it.Next() { | |||
t.Delete(it.RawKey()) | |||
drop++ | |||
} | |||
it.Close() | |||
return drop | |||
} | |||
func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) { | |||
bk := db.bEncodeBinKey(key, seq) | |||
segment, err := db.bucket.Get(bk) | |||
if err != nil { | |||
return bk, nil, err | |||
} | |||
return bk, segment, nil | |||
} | |||
func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) { | |||
bk, segment, err := db.bGetSegment(key, seq) | |||
if err == nil && segment == nil { | |||
segment = make([]byte, segByteSize, segByteSize) | |||
} | |||
return bk, segment, err | |||
} | |||
func (db *DB) bIterator(key []byte) *store.RangeLimitIterator { | |||
sk := db.bEncodeBinKey(key, minSeq) | |||
ek := db.bEncodeBinKey(key, maxSeq) | |||
return db.bucket.RangeIterator(sk, ek, store.RangeClose) | |||
} | |||
func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) { | |||
if a == nil || b == nil { | |||
*res = nil | |||
return | |||
} | |||
data := *res | |||
if data == nil { | |||
data = make([]byte, segByteSize, segByteSize) | |||
*res = data | |||
} | |||
for i := uint32(0); i < segByteSize; i++ { | |||
data[i] = a[i] & b[i] | |||
} | |||
return | |||
} | |||
func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) { | |||
if a == nil || b == nil { | |||
if a == nil && b == nil { | |||
*res = nil | |||
} else if a == nil { | |||
*res = b | |||
} else { | |||
*res = a | |||
} | |||
return | |||
} | |||
data := *res | |||
if data == nil { | |||
data = make([]byte, segByteSize, segByteSize) | |||
*res = data | |||
} | |||
for i := uint32(0); i < segByteSize; i++ { | |||
data[i] = a[i] | b[i] | |||
} | |||
return | |||
} | |||
func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) { | |||
if a == nil && b == nil { | |||
*res = fillSegment | |||
return | |||
} | |||
if a == nil { | |||
a = emptySegment | |||
} | |||
if b == nil { | |||
b = emptySegment | |||
} | |||
data := *res | |||
if data == nil { | |||
data = make([]byte, segByteSize, segByteSize) | |||
*res = data | |||
} | |||
for i := uint32(0); i < segByteSize; i++ { | |||
data[i] = a[i] ^ b[i] | |||
} | |||
return | |||
} | |||
func (db *DB) bExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, BitType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 { | |||
if soff > eoff { | |||
soff, eoff = eoff, soff | |||
} | |||
mask := uint8(0) | |||
if soff > 0 { | |||
mask |= fillBits[soff-1] | |||
} | |||
if eoff < 7 { | |||
mask |= (fillBits[7] ^ fillBits[eoff]) | |||
} | |||
mask = fillBits[7] ^ mask | |||
return bitsInByte[val&mask] | |||
} | |||
func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) { | |||
if soff >= segBitSize || soff < 0 || | |||
eoff >= segBitSize || eoff < 0 { | |||
return | |||
} | |||
var segment []byte | |||
if _, segment, err = db.bGetSegment(key, seq); err != nil { | |||
return | |||
} | |||
if segment == nil { | |||
return | |||
} | |||
if soff > eoff { | |||
soff, eoff = eoff, soff | |||
} | |||
headIdx := int(soff >> 3) | |||
endIdx := int(eoff >> 3) | |||
sByteOff := soff - ((soff >> 3) << 3) | |||
eByteOff := eoff - ((eoff >> 3) << 3) | |||
if headIdx == endIdx { | |||
cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff) | |||
} else { | |||
cnt = db.bCountByte(segment[headIdx], sByteOff, 7) + | |||
db.bCountByte(segment[endIdx], 0, eByteOff) | |||
} | |||
// sum up following bytes | |||
for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 { | |||
cnt += bitsInByte[segment[idx]] | |||
if idx == end { | |||
break | |||
} | |||
} | |||
return | |||
} | |||
func (db *DB) BGet(key []byte) (data []byte, err error) { | |||
if err = checkKeySize(key); err != nil { | |||
return | |||
} | |||
var ts, to int32 | |||
if ts, to, err = db.bGetMeta(key); err != nil || ts < 0 { | |||
return | |||
} | |||
var tailSeq, tailOff = uint32(ts), uint32(to) | |||
var capByteSize uint32 = db.bCapByteSize(tailSeq, tailOff) | |||
data = make([]byte, capByteSize, capByteSize) | |||
minKey := db.bEncodeBinKey(key, minSeq) | |||
maxKey := db.bEncodeBinKey(key, tailSeq) | |||
it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) | |||
var seq, s, e uint32 | |||
for ; it.Valid(); it.Next() { | |||
if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { | |||
data = nil | |||
break | |||
} | |||
s = seq << segByteWidth | |||
e = MinUInt32(s+segByteSize, capByteSize) | |||
copy(data[s:e], it.RawValue()) | |||
} | |||
it.Close() | |||
return | |||
} | |||
func (db *DB) BDelete(key []byte) (drop int64, err error) { | |||
if err = checkKeySize(key); err != nil { | |||
return | |||
} | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
drop = db.bDelete(t, key) | |||
db.rmExpire(t, BitType, key) | |||
err = t.Commit() | |||
return | |||
} | |||
func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) { | |||
if err = checkKeySize(key); err != nil { | |||
return | |||
} | |||
// todo : check offset | |||
var seq, off uint32 | |||
if seq, off, err = db.bParseOffset(key, offset); err != nil { | |||
return 0, err | |||
} | |||
var bk, segment []byte | |||
if bk, segment, err = db.bAllocateSegment(key, seq); err != nil { | |||
return 0, err | |||
} | |||
if segment != nil { | |||
ori = getBit(segment, off) | |||
if setBit(segment, off, val) { | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
t.Put(bk, segment) | |||
if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil { | |||
err = e | |||
return | |||
} | |||
err = t.Commit() | |||
} | |||
} | |||
return | |||
} | |||
func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) { | |||
if err = checkKeySize(key); err != nil { | |||
return | |||
} | |||
// (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(), | |||
// here we sequence the params by pos, so that we can merge the execution of | |||
// diff pos setting which targets on the same segment respectively. ) | |||
// #1 : sequence request data | |||
var argCnt = len(args) | |||
var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt) | |||
var seq, off uint32 | |||
for i, info := range args { | |||
if seq, off, err = db.bParseOffset(key, info.Pos); err != nil { | |||
return | |||
} | |||
bitInfos[i].Seq = seq | |||
bitInfos[i].Off = off | |||
bitInfos[i].Val = info.Val | |||
} | |||
sort.Sort(bitInfos) | |||
for i := 1; i < argCnt; i++ { | |||
if bitInfos[i].Seq == bitInfos[i-1].Seq && bitInfos[i].Off == bitInfos[i-1].Off { | |||
return 0, errDuplicatePos | |||
} | |||
} | |||
// #2 : execute bit set in order | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var curBinKey, curSeg []byte | |||
var curSeq, maxSeq, maxOff uint32 | |||
for _, info := range bitInfos { | |||
if curSeg != nil && info.Seq != curSeq { | |||
t.Put(curBinKey, curSeg) | |||
curSeg = nil | |||
} | |||
if curSeg == nil { | |||
curSeq = info.Seq | |||
if curBinKey, curSeg, err = db.bAllocateSegment(key, info.Seq); err != nil { | |||
return | |||
} | |||
if curSeg == nil { | |||
continue | |||
} | |||
} | |||
if setBit(curSeg, info.Off, info.Val) { | |||
maxSeq = info.Seq | |||
maxOff = info.Off | |||
place++ | |||
} | |||
} | |||
if curSeg != nil { | |||
t.Put(curBinKey, curSeg) | |||
} | |||
// finally, update meta | |||
if place > 0 { | |||
if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil { | |||
return | |||
} | |||
err = t.Commit() | |||
} | |||
return | |||
} | |||
func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) { | |||
if seq, off, err := db.bParseOffset(key, offset); err != nil { | |||
return 0, err | |||
} else { | |||
_, segment, err := db.bGetSegment(key, seq) | |||
if err != nil { | |||
return 0, err | |||
} | |||
if segment == nil { | |||
return 0, nil | |||
} else { | |||
return getBit(segment, off), nil | |||
} | |||
} | |||
} | |||
// func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) { | |||
// section := make([]byte) | |||
// return | |||
// } | |||
func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) { | |||
var sseq, soff uint32 | |||
if sseq, soff, err = db.bParseOffset(key, start); err != nil { | |||
return | |||
} | |||
var eseq, eoff uint32 | |||
if eseq, eoff, err = db.bParseOffset(key, end); err != nil { | |||
return | |||
} | |||
if sseq > eseq || (sseq == eseq && soff > eoff) { | |||
sseq, eseq = eseq, sseq | |||
soff, eoff = eoff, soff | |||
} | |||
var segCnt int32 | |||
if eseq == sseq { | |||
if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil { | |||
return 0, err | |||
} | |||
cnt = segCnt | |||
} else { | |||
if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil { | |||
return 0, err | |||
} else { | |||
cnt += segCnt | |||
} | |||
if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil { | |||
return 0, err | |||
} else { | |||
cnt += segCnt | |||
} | |||
} | |||
// middle segs | |||
var segment []byte | |||
skey := db.bEncodeBinKey(key, sseq) | |||
ekey := db.bEncodeBinKey(key, eseq) | |||
it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen) | |||
for ; it.Valid(); it.Next() { | |||
segment = it.RawValue() | |||
for _, bt := range segment { | |||
cnt += bitsInByte[bt] | |||
} | |||
} | |||
it.Close() | |||
return | |||
} | |||
func (db *DB) BTail(key []byte) (int32, error) { | |||
// effective length of data, the highest bit-pos set in history | |||
tailSeq, tailOff, err := db.bGetMeta(key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
tail := int32(-1) | |||
if tailSeq >= 0 { | |||
tail = int32(uint32(tailSeq)<<segBitWidth | uint32(tailOff)) | |||
} | |||
return tail, nil | |||
} | |||
func (db *DB) BOperation(op uint8, dstkey []byte, srckeys ...[]byte) (blen int32, err error) { | |||
// blen - | |||
// the total bit size of data stored in destination key, | |||
// that is equal to the size of the longest input string. | |||
var exeOp func([]byte, []byte, *[]byte) | |||
switch op { | |||
case OPand: | |||
exeOp = db.bSegAnd | |||
case OPor: | |||
exeOp = db.bSegOr | |||
case OPxor, OPnot: | |||
exeOp = db.bSegXor | |||
default: | |||
return | |||
} | |||
if dstkey == nil || srckeys == nil { | |||
return | |||
} | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var srcKseq, srcKoff int32 | |||
var seq, off, maxDstSeq, maxDstOff uint32 | |||
var keyNum int = len(srckeys) | |||
var validKeyNum int | |||
for i := 0; i < keyNum; i++ { | |||
if srcKseq, srcKoff, err = db.bGetMeta(srckeys[i]); err != nil { | |||
return | |||
} else if srcKseq < 0 { | |||
srckeys[i] = nil | |||
continue | |||
} | |||
validKeyNum++ | |||
seq = uint32(srcKseq) | |||
off = uint32(srcKoff) | |||
if seq > maxDstSeq || (seq == maxDstSeq && off > maxDstOff) { | |||
maxDstSeq = seq | |||
maxDstOff = off | |||
} | |||
} | |||
if (op == OPnot && validKeyNum != 1) || | |||
(op != OPnot && validKeyNum < 2) { | |||
return // with not enough existing source key | |||
} | |||
var srcIdx int | |||
for srcIdx = 0; srcIdx < keyNum; srcIdx++ { | |||
if srckeys[srcIdx] != nil { | |||
break | |||
} | |||
} | |||
// init - data | |||
var segments = make([][]byte, maxDstSeq+1) | |||
if op == OPnot { | |||
// ps : | |||
// ( ~num == num ^ 0x11111111 ) | |||
// we init the result segments with all bit set, | |||
// then we can calculate through the way of 'xor'. | |||
// ahead segments bin format : 1111 ... 1111 | |||
for i := uint32(0); i < maxDstSeq; i++ { | |||
segments[i] = fillSegment | |||
} | |||
// last segment bin format : 1111..1100..0000 | |||
var tailSeg = make([]byte, segByteSize, segByteSize) | |||
var fillByte = fillBits[7] | |||
var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff) | |||
for i := uint32(0); i < tailSegLen-1; i++ { | |||
tailSeg[i] = fillByte | |||
} | |||
tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3] | |||
segments[maxDstSeq] = tailSeg | |||
} else { | |||
// ps : init segments by data corresponding to the 1st valid source key | |||
it := db.bIterator(srckeys[srcIdx]) | |||
for ; it.Valid(); it.Next() { | |||
if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { | |||
// to do ... | |||
it.Close() | |||
return | |||
} | |||
segments[seq] = it.Value() | |||
} | |||
it.Close() | |||
srcIdx++ | |||
} | |||
// operation with following keys | |||
var res []byte | |||
for i := srcIdx; i < keyNum; i++ { | |||
if srckeys[i] == nil { | |||
continue | |||
} | |||
it := db.bIterator(srckeys[i]) | |||
for idx, end := uint32(0), false; !end; it.Next() { | |||
end = !it.Valid() | |||
if !end { | |||
if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { | |||
// to do ... | |||
it.Close() | |||
return | |||
} | |||
} else { | |||
seq = maxDstSeq + 1 | |||
} | |||
// todo : | |||
// operation 'and' can be optimize here : | |||
// if seq > max_segments_idx, this loop can be break, | |||
// which can avoid cost from Key() and bDecodeBinKey() | |||
for ; idx < seq; idx++ { | |||
res = nil | |||
exeOp(segments[idx], nil, &res) | |||
segments[idx] = res | |||
} | |||
if !end { | |||
res = it.Value() | |||
exeOp(segments[seq], res, &res) | |||
segments[seq] = res | |||
idx++ | |||
} | |||
} | |||
it.Close() | |||
} | |||
// clear the old data in case | |||
db.bDelete(t, dstkey) | |||
db.rmExpire(t, BitType, dstkey) | |||
// set data | |||
db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff) | |||
var bk []byte | |||
for seq, segt := range segments { | |||
if segt != nil { | |||
bk = db.bEncodeBinKey(dstkey, uint32(seq)) | |||
t.Put(bk, segt) | |||
} | |||
} | |||
err = t.Commit() | |||
if err == nil { | |||
// blen = int32(db.bCapByteSize(maxDstOff, maxDstOff)) | |||
blen = int32(maxDstSeq<<segBitWidth | maxDstOff + 1) | |||
} | |||
return | |||
} | |||
func (db *DB) BExpire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.bExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) BExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.bExpireAt(key, when) | |||
} | |||
func (db *DB) BTTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(BitType, key) | |||
} | |||
func (db *DB) BPersist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, BitType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) BScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(BitMetaType, key, count, inclusive, match) | |||
} | |||
func (db *DB) bFlush() (drop int64, err error) { | |||
t := db.binBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, BitType) | |||
} |
@ -0,0 +1,509 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
type FVPair struct { | |||
Field []byte | |||
Value []byte | |||
} | |||
var errHashKey = errors.New("invalid hash key") | |||
var errHSizeKey = errors.New("invalid hsize key") | |||
const ( | |||
hashStartSep byte = ':' | |||
hashStopSep byte = hashStartSep + 1 | |||
) | |||
func checkHashKFSize(key []byte, field []byte) error { | |||
if len(key) > MaxKeySize || len(key) == 0 { | |||
return errKeySize | |||
} else if len(field) > MaxHashFieldSize || len(field) == 0 { | |||
return errHashFieldSize | |||
} | |||
return nil | |||
} | |||
func (db *DB) hEncodeSizeKey(key []byte) []byte { | |||
buf := make([]byte, len(key)+2) | |||
buf[0] = db.index | |||
buf[1] = HSizeType | |||
copy(buf[2:], key) | |||
return buf | |||
} | |||
func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != HSizeType { | |||
return nil, errHSizeKey | |||
} | |||
return ek[2:], nil | |||
} | |||
func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte { | |||
buf := make([]byte, len(key)+len(field)+1+1+2+1) | |||
pos := 0 | |||
buf[pos] = db.index | |||
pos++ | |||
buf[pos] = HashType | |||
pos++ | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
buf[pos] = hashStartSep | |||
pos++ | |||
copy(buf[pos:], field) | |||
return buf | |||
} | |||
func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) { | |||
if len(ek) < 5 || ek[0] != db.index || ek[1] != HashType { | |||
return nil, nil, errHashKey | |||
} | |||
pos := 2 | |||
keyLen := int(binary.BigEndian.Uint16(ek[pos:])) | |||
pos += 2 | |||
if keyLen+5 > len(ek) { | |||
return nil, nil, errHashKey | |||
} | |||
key := ek[pos : pos+keyLen] | |||
pos += keyLen | |||
if ek[pos] != hashStartSep { | |||
return nil, nil, errHashKey | |||
} | |||
pos++ | |||
field := ek[pos:] | |||
return key, field, nil | |||
} | |||
func (db *DB) hEncodeStartKey(key []byte) []byte { | |||
return db.hEncodeHashKey(key, nil) | |||
} | |||
func (db *DB) hEncodeStopKey(key []byte) []byte { | |||
k := db.hEncodeHashKey(key, nil) | |||
k[len(k)-1] = hashStopSep | |||
return k | |||
} | |||
func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) { | |||
t := db.hashBatch | |||
ek := db.hEncodeHashKey(key, field) | |||
var n int64 = 1 | |||
if v, _ := db.bucket.Get(ek); v != nil { | |||
n = 0 | |||
} else { | |||
if _, err := db.hIncrSize(key, 1); err != nil { | |||
return 0, err | |||
} | |||
} | |||
t.Put(ek, value) | |||
return n, nil | |||
} | |||
// ps : here just focus on deleting the hash data, | |||
// any other likes expire is ignore. | |||
func (db *DB) hDelete(t *batch, key []byte) int64 { | |||
sk := db.hEncodeSizeKey(key) | |||
start := db.hEncodeStartKey(key) | |||
stop := db.hEncodeStopKey(key) | |||
var num int64 = 0 | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
t.Delete(it.Key()) | |||
num++ | |||
} | |||
it.Close() | |||
t.Delete(sk) | |||
return num | |||
} | |||
func (db *DB) hExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if hlen, err := db.HLen(key); err != nil || hlen == 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, HashType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) HLen(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) | |||
} | |||
func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { | |||
if err := checkHashKFSize(key, field); err != nil { | |||
return 0, err | |||
} else if err := checkValueSize(value); err != nil { | |||
return 0, err | |||
} | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.hSetItem(key, field, value) | |||
if err != nil { | |||
return 0, err | |||
} | |||
//todo add binlog | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { | |||
if err := checkHashKFSize(key, field); err != nil { | |||
return nil, err | |||
} | |||
return db.bucket.Get(db.hEncodeHashKey(key, field)) | |||
} | |||
func (db *DB) HMset(key []byte, args ...FVPair) error { | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var err error | |||
var ek []byte | |||
var num int64 = 0 | |||
for i := 0; i < len(args); i++ { | |||
if err := checkHashKFSize(key, args[i].Field); err != nil { | |||
return err | |||
} else if err := checkValueSize(args[i].Value); err != nil { | |||
return err | |||
} | |||
ek = db.hEncodeHashKey(key, args[i].Field) | |||
if v, err := db.bucket.Get(ek); err != nil { | |||
return err | |||
} else if v == nil { | |||
num++ | |||
} | |||
t.Put(ek, args[i].Value) | |||
} | |||
if _, err = db.hIncrSize(key, num); err != nil { | |||
return err | |||
} | |||
//todo add binglog | |||
err = t.Commit() | |||
return err | |||
} | |||
func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { | |||
var ek []byte | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
r := make([][]byte, len(args)) | |||
for i := 0; i < len(args); i++ { | |||
if err := checkHashKFSize(key, args[i]); err != nil { | |||
return nil, err | |||
} | |||
ek = db.hEncodeHashKey(key, args[i]) | |||
r[i] = it.Find(ek) | |||
} | |||
return r, nil | |||
} | |||
func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { | |||
t := db.hashBatch | |||
var ek []byte | |||
var v []byte | |||
var err error | |||
t.Lock() | |||
defer t.Unlock() | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
var num int64 = 0 | |||
for i := 0; i < len(args); i++ { | |||
if err := checkHashKFSize(key, args[i]); err != nil { | |||
return 0, err | |||
} | |||
ek = db.hEncodeHashKey(key, args[i]) | |||
v = it.RawFind(ek) | |||
if v == nil { | |||
continue | |||
} else { | |||
num++ | |||
t.Delete(ek) | |||
} | |||
} | |||
if _, err = db.hIncrSize(key, -num); err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) { | |||
t := db.hashBatch | |||
sk := db.hEncodeSizeKey(key) | |||
var err error | |||
var size int64 = 0 | |||
if size, err = Int64(db.bucket.Get(sk)); err != nil { | |||
return 0, err | |||
} else { | |||
size += delta | |||
if size <= 0 { | |||
size = 0 | |||
t.Delete(sk) | |||
db.rmExpire(t, HashType, key) | |||
} else { | |||
t.Put(sk, PutInt64(size)) | |||
} | |||
} | |||
return size, nil | |||
} | |||
func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { | |||
if err := checkHashKFSize(key, field); err != nil { | |||
return 0, err | |||
} | |||
t := db.hashBatch | |||
var ek []byte | |||
var err error | |||
t.Lock() | |||
defer t.Unlock() | |||
ek = db.hEncodeHashKey(key, field) | |||
var n int64 = 0 | |||
if n, err = StrInt64(db.bucket.Get(ek)); err != nil { | |||
return 0, err | |||
} | |||
n += delta | |||
_, err = db.hSetItem(key, field, StrPutInt64(n)) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) HGetAll(key []byte) ([]FVPair, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
start := db.hEncodeStartKey(key) | |||
stop := db.hEncodeStopKey(key) | |||
v := make([]FVPair, 0, 16) | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
_, f, err := db.hDecodeHashKey(it.Key()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
v = append(v, FVPair{Field: f, Value: it.Value()}) | |||
} | |||
it.Close() | |||
return v, nil | |||
} | |||
func (db *DB) HKeys(key []byte) ([][]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
start := db.hEncodeStartKey(key) | |||
stop := db.hEncodeStopKey(key) | |||
v := make([][]byte, 0, 16) | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
_, f, err := db.hDecodeHashKey(it.Key()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
v = append(v, f) | |||
} | |||
it.Close() | |||
return v, nil | |||
} | |||
func (db *DB) HValues(key []byte) ([][]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
start := db.hEncodeStartKey(key) | |||
stop := db.hEncodeStopKey(key) | |||
v := make([][]byte, 0, 16) | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
_, _, err := db.hDecodeHashKey(it.Key()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
v = append(v, it.Value()) | |||
} | |||
it.Close() | |||
return v, nil | |||
} | |||
func (db *DB) HClear(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
num := db.hDelete(t, key) | |||
db.rmExpire(t, HashType, key) | |||
err := t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) HMclear(keys ...[]byte) (int64, error) { | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
for _, key := range keys { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
db.hDelete(t, key) | |||
db.rmExpire(t, HashType, key) | |||
} | |||
err := t.Commit() | |||
return int64(len(keys)), err | |||
} | |||
func (db *DB) hFlush() (drop int64, err error) { | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, HashType) | |||
} | |||
func (db *DB) HScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(HSizeType, key, count, inclusive, match) | |||
} | |||
func (db *DB) HExpire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
return db.hExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
return db.hExpireAt(key, when) | |||
} | |||
func (db *DB) HTTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(HashType, key) | |||
} | |||
func (db *DB) HPersist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.hashBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, HashType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} |
@ -0,0 +1,387 @@ | |||
package nodb | |||
import ( | |||
"errors" | |||
"time" | |||
) | |||
type KVPair struct { | |||
Key []byte | |||
Value []byte | |||
} | |||
var errKVKey = errors.New("invalid encode kv key") | |||
func checkKeySize(key []byte) error { | |||
if len(key) > MaxKeySize || len(key) == 0 { | |||
return errKeySize | |||
} | |||
return nil | |||
} | |||
func checkValueSize(value []byte) error { | |||
if len(value) > MaxValueSize { | |||
return errValueSize | |||
} | |||
return nil | |||
} | |||
func (db *DB) encodeKVKey(key []byte) []byte { | |||
ek := make([]byte, len(key)+2) | |||
ek[0] = db.index | |||
ek[1] = KVType | |||
copy(ek[2:], key) | |||
return ek | |||
} | |||
func (db *DB) decodeKVKey(ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != KVType { | |||
return nil, errKVKey | |||
} | |||
return ek[2:], nil | |||
} | |||
func (db *DB) encodeKVMinKey() []byte { | |||
ek := db.encodeKVKey(nil) | |||
return ek | |||
} | |||
func (db *DB) encodeKVMaxKey() []byte { | |||
ek := db.encodeKVKey(nil) | |||
ek[len(ek)-1] = KVType + 1 | |||
return ek | |||
} | |||
func (db *DB) incr(key []byte, delta int64) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
var err error | |||
key = db.encodeKVKey(key) | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var n int64 | |||
n, err = StrInt64(db.bucket.Get(key)) | |||
if err != nil { | |||
return 0, err | |||
} | |||
n += delta | |||
t.Put(key, StrPutInt64(n)) | |||
//todo binlog | |||
err = t.Commit() | |||
return n, err | |||
} | |||
// ps : here just focus on deleting the key-value data, | |||
// any other likes expire is ignore. | |||
func (db *DB) delete(t *batch, key []byte) int64 { | |||
key = db.encodeKVKey(key) | |||
t.Delete(key) | |||
return 1 | |||
} | |||
func (db *DB) setExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if exist, err := db.Exists(key); err != nil || exist == 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, KVType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) Decr(key []byte) (int64, error) { | |||
return db.incr(key, -1) | |||
} | |||
func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { | |||
return db.incr(key, -decrement) | |||
} | |||
func (db *DB) Del(keys ...[]byte) (int64, error) { | |||
if len(keys) == 0 { | |||
return 0, nil | |||
} | |||
codedKeys := make([][]byte, len(keys)) | |||
for i, k := range keys { | |||
codedKeys[i] = db.encodeKVKey(k) | |||
} | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
for i, k := range keys { | |||
t.Delete(codedKeys[i]) | |||
db.rmExpire(t, KVType, k) | |||
} | |||
err := t.Commit() | |||
return int64(len(keys)), err | |||
} | |||
func (db *DB) Exists(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
var err error | |||
key = db.encodeKVKey(key) | |||
var v []byte | |||
v, err = db.bucket.Get(key) | |||
if v != nil && err == nil { | |||
return 1, nil | |||
} | |||
return 0, err | |||
} | |||
func (db *DB) Get(key []byte) ([]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
key = db.encodeKVKey(key) | |||
return db.bucket.Get(key) | |||
} | |||
func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} else if err := checkValueSize(value); err != nil { | |||
return nil, err | |||
} | |||
key = db.encodeKVKey(key) | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
oldValue, err := db.bucket.Get(key) | |||
if err != nil { | |||
return nil, err | |||
} | |||
t.Put(key, value) | |||
//todo, binlog | |||
err = t.Commit() | |||
return oldValue, err | |||
} | |||
func (db *DB) Incr(key []byte) (int64, error) { | |||
return db.incr(key, 1) | |||
} | |||
func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { | |||
return db.incr(key, increment) | |||
} | |||
func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { | |||
values := make([][]byte, len(keys)) | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
for i := range keys { | |||
if err := checkKeySize(keys[i]); err != nil { | |||
return nil, err | |||
} | |||
values[i] = it.Find(db.encodeKVKey(keys[i])) | |||
} | |||
return values, nil | |||
} | |||
func (db *DB) MSet(args ...KVPair) error { | |||
if len(args) == 0 { | |||
return nil | |||
} | |||
t := db.kvBatch | |||
var err error | |||
var key []byte | |||
var value []byte | |||
t.Lock() | |||
defer t.Unlock() | |||
for i := 0; i < len(args); i++ { | |||
if err := checkKeySize(args[i].Key); err != nil { | |||
return err | |||
} else if err := checkValueSize(args[i].Value); err != nil { | |||
return err | |||
} | |||
key = db.encodeKVKey(args[i].Key) | |||
value = args[i].Value | |||
t.Put(key, value) | |||
//todo binlog | |||
} | |||
err = t.Commit() | |||
return err | |||
} | |||
func (db *DB) Set(key []byte, value []byte) error { | |||
if err := checkKeySize(key); err != nil { | |||
return err | |||
} else if err := checkValueSize(value); err != nil { | |||
return err | |||
} | |||
var err error | |||
key = db.encodeKVKey(key) | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
t.Put(key, value) | |||
err = t.Commit() | |||
return err | |||
} | |||
func (db *DB) SetNX(key []byte, value []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} else if err := checkValueSize(value); err != nil { | |||
return 0, err | |||
} | |||
var err error | |||
key = db.encodeKVKey(key) | |||
var n int64 = 1 | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if v, err := db.bucket.Get(key); err != nil { | |||
return 0, err | |||
} else if v != nil { | |||
n = 0 | |||
} else { | |||
t.Put(key, value) | |||
//todo binlog | |||
err = t.Commit() | |||
} | |||
return n, err | |||
} | |||
func (db *DB) flush() (drop int64, err error) { | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, KVType) | |||
} | |||
//if inclusive is true, scan range [key, inf) else (key, inf) | |||
func (db *DB) Scan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(KVType, key, count, inclusive, match) | |||
} | |||
func (db *DB) Expire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
return db.setExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
return db.setExpireAt(key, when) | |||
} | |||
func (db *DB) TTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(KVType, key) | |||
} | |||
func (db *DB) Persist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.kvBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, KVType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) Lock() { | |||
t := db.kvBatch | |||
t.Lock() | |||
} | |||
func (db *DB) Remove(key []byte) bool { | |||
if len(key) == 0 { | |||
return false | |||
} | |||
t := db.kvBatch | |||
t.Delete(db.encodeKVKey(key)) | |||
_, err := db.rmExpire(t, KVType, key) | |||
if err != nil { | |||
return false | |||
} | |||
return true | |||
} | |||
func (db *DB) Commit() error { | |||
t := db.kvBatch | |||
return t.Commit() | |||
} | |||
func (db *DB) Unlock() { | |||
t := db.kvBatch | |||
t.Unlock() | |||
} |
@ -0,0 +1,492 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
const ( | |||
listHeadSeq int32 = 1 | |||
listTailSeq int32 = 2 | |||
listMinSeq int32 = 1000 | |||
listMaxSeq int32 = 1<<31 - 1000 | |||
listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2 | |||
) | |||
var errLMetaKey = errors.New("invalid lmeta key") | |||
var errListKey = errors.New("invalid list key") | |||
var errListSeq = errors.New("invalid list sequence, overflow") | |||
func (db *DB) lEncodeMetaKey(key []byte) []byte { | |||
buf := make([]byte, len(key)+2) | |||
buf[0] = db.index | |||
buf[1] = LMetaType | |||
copy(buf[2:], key) | |||
return buf | |||
} | |||
func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != LMetaType { | |||
return nil, errLMetaKey | |||
} | |||
return ek[2:], nil | |||
} | |||
func (db *DB) lEncodeListKey(key []byte, seq int32) []byte { | |||
buf := make([]byte, len(key)+8) | |||
pos := 0 | |||
buf[pos] = db.index | |||
pos++ | |||
buf[pos] = ListType | |||
pos++ | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
binary.BigEndian.PutUint32(buf[pos:], uint32(seq)) | |||
return buf | |||
} | |||
func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) { | |||
if len(ek) < 8 || ek[0] != db.index || ek[1] != ListType { | |||
err = errListKey | |||
return | |||
} | |||
keyLen := int(binary.BigEndian.Uint16(ek[2:])) | |||
if keyLen+8 != len(ek) { | |||
err = errListKey | |||
return | |||
} | |||
key = ek[4 : 4+keyLen] | |||
seq = int32(binary.BigEndian.Uint32(ek[4+keyLen:])) | |||
return | |||
} | |||
func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
var headSeq int32 | |||
var tailSeq int32 | |||
var size int32 | |||
var err error | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
metaKey := db.lEncodeMetaKey(key) | |||
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) | |||
if err != nil { | |||
return 0, err | |||
} | |||
var pushCnt int = len(args) | |||
if pushCnt == 0 { | |||
return int64(size), nil | |||
} | |||
var seq int32 = headSeq | |||
var delta int32 = -1 | |||
if whereSeq == listTailSeq { | |||
seq = tailSeq | |||
delta = 1 | |||
} | |||
// append elements | |||
if size > 0 { | |||
seq += delta | |||
} | |||
for i := 0; i < pushCnt; i++ { | |||
ek := db.lEncodeListKey(key, seq+int32(i)*delta) | |||
t.Put(ek, args[i]) | |||
} | |||
seq += int32(pushCnt-1) * delta | |||
if seq <= listMinSeq || seq >= listMaxSeq { | |||
return 0, errListSeq | |||
} | |||
// set meta info | |||
if whereSeq == listHeadSeq { | |||
headSeq = seq | |||
} else { | |||
tailSeq = seq | |||
} | |||
db.lSetMeta(metaKey, headSeq, tailSeq) | |||
err = t.Commit() | |||
return int64(size) + int64(pushCnt), err | |||
} | |||
func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var headSeq int32 | |||
var tailSeq int32 | |||
var err error | |||
metaKey := db.lEncodeMetaKey(key) | |||
headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey) | |||
if err != nil { | |||
return nil, err | |||
} | |||
var value []byte | |||
var seq int32 = headSeq | |||
if whereSeq == listTailSeq { | |||
seq = tailSeq | |||
} | |||
itemKey := db.lEncodeListKey(key, seq) | |||
value, err = db.bucket.Get(itemKey) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if whereSeq == listHeadSeq { | |||
headSeq += 1 | |||
} else { | |||
tailSeq -= 1 | |||
} | |||
t.Delete(itemKey) | |||
size := db.lSetMeta(metaKey, headSeq, tailSeq) | |||
if size == 0 { | |||
db.rmExpire(t, HashType, key) | |||
} | |||
err = t.Commit() | |||
return value, err | |||
} | |||
// ps : here just focus on deleting the list data, | |||
// any other likes expire is ignore. | |||
func (db *DB) lDelete(t *batch, key []byte) int64 { | |||
mk := db.lEncodeMetaKey(key) | |||
var headSeq int32 | |||
var tailSeq int32 | |||
var err error | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
headSeq, tailSeq, _, err = db.lGetMeta(it, mk) | |||
if err != nil { | |||
return 0 | |||
} | |||
var num int64 = 0 | |||
startKey := db.lEncodeListKey(key, headSeq) | |||
stopKey := db.lEncodeListKey(key, tailSeq) | |||
rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) | |||
for ; rit.Valid(); rit.Next() { | |||
t.Delete(rit.RawKey()) | |||
num++ | |||
} | |||
t.Delete(mk) | |||
return num | |||
} | |||
func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) { | |||
var v []byte | |||
if it != nil { | |||
v = it.Find(ek) | |||
} else { | |||
v, err = db.bucket.Get(ek) | |||
} | |||
if err != nil { | |||
return | |||
} else if v == nil { | |||
headSeq = listInitialSeq | |||
tailSeq = listInitialSeq | |||
size = 0 | |||
return | |||
} else { | |||
headSeq = int32(binary.LittleEndian.Uint32(v[0:4])) | |||
tailSeq = int32(binary.LittleEndian.Uint32(v[4:8])) | |||
size = tailSeq - headSeq + 1 | |||
} | |||
return | |||
} | |||
func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { | |||
t := db.listBatch | |||
var size int32 = tailSeq - headSeq + 1 | |||
if size < 0 { | |||
// todo : log error + panic | |||
} else if size == 0 { | |||
t.Delete(ek) | |||
} else { | |||
buf := make([]byte, 8) | |||
binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq)) | |||
binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq)) | |||
t.Put(ek, buf) | |||
} | |||
return size | |||
} | |||
func (db *DB) lExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if llen, err := db.LLen(key); err != nil || llen == 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, ListType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
var seq int32 | |||
var headSeq int32 | |||
var tailSeq int32 | |||
var err error | |||
metaKey := db.lEncodeMetaKey(key) | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if index >= 0 { | |||
seq = headSeq + index | |||
} else { | |||
seq = tailSeq + index + 1 | |||
} | |||
sk := db.lEncodeListKey(key, seq) | |||
v := it.Find(sk) | |||
return v, nil | |||
} | |||
func (db *DB) LLen(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
ek := db.lEncodeMetaKey(key) | |||
_, _, size, err := db.lGetMeta(nil, ek) | |||
return int64(size), err | |||
} | |||
func (db *DB) LPop(key []byte) ([]byte, error) { | |||
return db.lpop(key, listHeadSeq) | |||
} | |||
func (db *DB) LPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) { | |||
var argss = [][]byte{arg1} | |||
argss = append(argss, args...) | |||
return db.lpush(key, listHeadSeq, argss...) | |||
} | |||
func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
var headSeq int32 | |||
var llen int32 | |||
var err error | |||
metaKey := db.lEncodeMetaKey(key) | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil { | |||
return nil, err | |||
} | |||
if start < 0 { | |||
start = llen + start | |||
} | |||
if stop < 0 { | |||
stop = llen + stop | |||
} | |||
if start < 0 { | |||
start = 0 | |||
} | |||
if start > stop || start >= llen { | |||
return [][]byte{}, nil | |||
} | |||
if stop >= llen { | |||
stop = llen - 1 | |||
} | |||
limit := (stop - start) + 1 | |||
headSeq += start | |||
v := make([][]byte, 0, limit) | |||
startKey := db.lEncodeListKey(key, headSeq) | |||
rit := store.NewRangeLimitIterator(it, | |||
&store.Range{ | |||
Min: startKey, | |||
Max: nil, | |||
Type: store.RangeClose}, | |||
&store.Limit{ | |||
Offset: 0, | |||
Count: int(limit)}) | |||
for ; rit.Valid(); rit.Next() { | |||
v = append(v, rit.Value()) | |||
} | |||
return v, nil | |||
} | |||
func (db *DB) RPop(key []byte) ([]byte, error) { | |||
return db.lpop(key, listTailSeq) | |||
} | |||
func (db *DB) RPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) { | |||
var argss = [][]byte{arg1} | |||
argss = append(argss, args...) | |||
return db.lpush(key, listTailSeq, argss...) | |||
} | |||
func (db *DB) LClear(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
num := db.lDelete(t, key) | |||
db.rmExpire(t, ListType, key) | |||
err := t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) LMclear(keys ...[]byte) (int64, error) { | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
for _, key := range keys { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
db.lDelete(t, key) | |||
db.rmExpire(t, ListType, key) | |||
} | |||
err := t.Commit() | |||
return int64(len(keys)), err | |||
} | |||
func (db *DB) lFlush() (drop int64, err error) { | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, ListType) | |||
} | |||
func (db *DB) LExpire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
return db.lExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
return db.lExpireAt(key, when) | |||
} | |||
func (db *DB) LTTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(ListType, key) | |||
} | |||
func (db *DB) LPersist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.listBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, ListType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) LScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(LMetaType, key, count, inclusive, match) | |||
} | |||
func (db *DB) lEncodeMinKey() []byte { | |||
return db.lEncodeMetaKey(nil) | |||
} | |||
func (db *DB) lEncodeMaxKey() []byte { | |||
ek := db.lEncodeMetaKey(nil) | |||
ek[len(ek)-1] = LMetaType + 1 | |||
return ek | |||
} |
@ -0,0 +1,601 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
var errSetKey = errors.New("invalid set key") | |||
var errSSizeKey = errors.New("invalid ssize key") | |||
const ( | |||
setStartSep byte = ':' | |||
setStopSep byte = setStartSep + 1 | |||
UnionType byte = 51 | |||
DiffType byte = 52 | |||
InterType byte = 53 | |||
) | |||
func checkSetKMSize(key []byte, member []byte) error { | |||
if len(key) > MaxKeySize || len(key) == 0 { | |||
return errKeySize | |||
} else if len(member) > MaxSetMemberSize || len(member) == 0 { | |||
return errSetMemberSize | |||
} | |||
return nil | |||
} | |||
func (db *DB) sEncodeSizeKey(key []byte) []byte { | |||
buf := make([]byte, len(key)+2) | |||
buf[0] = db.index | |||
buf[1] = SSizeType | |||
copy(buf[2:], key) | |||
return buf | |||
} | |||
func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != SSizeType { | |||
return nil, errSSizeKey | |||
} | |||
return ek[2:], nil | |||
} | |||
func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte { | |||
buf := make([]byte, len(key)+len(member)+1+1+2+1) | |||
pos := 0 | |||
buf[pos] = db.index | |||
pos++ | |||
buf[pos] = SetType | |||
pos++ | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
buf[pos] = setStartSep | |||
pos++ | |||
copy(buf[pos:], member) | |||
return buf | |||
} | |||
func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) { | |||
if len(ek) < 5 || ek[0] != db.index || ek[1] != SetType { | |||
return nil, nil, errSetKey | |||
} | |||
pos := 2 | |||
keyLen := int(binary.BigEndian.Uint16(ek[pos:])) | |||
pos += 2 | |||
if keyLen+5 > len(ek) { | |||
return nil, nil, errSetKey | |||
} | |||
key := ek[pos : pos+keyLen] | |||
pos += keyLen | |||
if ek[pos] != hashStartSep { | |||
return nil, nil, errSetKey | |||
} | |||
pos++ | |||
member := ek[pos:] | |||
return key, member, nil | |||
} | |||
func (db *DB) sEncodeStartKey(key []byte) []byte { | |||
return db.sEncodeSetKey(key, nil) | |||
} | |||
func (db *DB) sEncodeStopKey(key []byte) []byte { | |||
k := db.sEncodeSetKey(key, nil) | |||
k[len(k)-1] = setStopSep | |||
return k | |||
} | |||
func (db *DB) sFlush() (drop int64, err error) { | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, SetType) | |||
} | |||
func (db *DB) sDelete(t *batch, key []byte) int64 { | |||
sk := db.sEncodeSizeKey(key) | |||
start := db.sEncodeStartKey(key) | |||
stop := db.sEncodeStopKey(key) | |||
var num int64 = 0 | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
t.Delete(it.RawKey()) | |||
num++ | |||
} | |||
it.Close() | |||
t.Delete(sk) | |||
return num | |||
} | |||
func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) { | |||
t := db.setBatch | |||
sk := db.sEncodeSizeKey(key) | |||
var err error | |||
var size int64 = 0 | |||
if size, err = Int64(db.bucket.Get(sk)); err != nil { | |||
return 0, err | |||
} else { | |||
size += delta | |||
if size <= 0 { | |||
size = 0 | |||
t.Delete(sk) | |||
db.rmExpire(t, SetType, key) | |||
} else { | |||
t.Put(sk, PutInt64(size)) | |||
} | |||
} | |||
return size, nil | |||
} | |||
func (db *DB) sExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if scnt, err := db.SCard(key); err != nil || scnt == 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, SetType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) sSetItem(key []byte, member []byte) (int64, error) { | |||
t := db.setBatch | |||
ek := db.sEncodeSetKey(key, member) | |||
var n int64 = 1 | |||
if v, _ := db.bucket.Get(ek); v != nil { | |||
n = 0 | |||
} else { | |||
if _, err := db.sIncrSize(key, 1); err != nil { | |||
return 0, err | |||
} | |||
} | |||
t.Put(ek, nil) | |||
return n, nil | |||
} | |||
func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var err error | |||
var ek []byte | |||
var num int64 = 0 | |||
for i := 0; i < len(args); i++ { | |||
if err := checkSetKMSize(key, args[i]); err != nil { | |||
return 0, err | |||
} | |||
ek = db.sEncodeSetKey(key, args[i]) | |||
if v, err := db.bucket.Get(ek); err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
num++ | |||
} | |||
t.Put(ek, nil) | |||
} | |||
if _, err = db.sIncrSize(key, num); err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) SCard(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
sk := db.sEncodeSizeKey(key) | |||
return Int64(db.bucket.Get(sk)) | |||
} | |||
func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { | |||
destMap := make(map[string]bool) | |||
members, err := db.SMembers(keys[0]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, m := range members { | |||
destMap[String(m)] = true | |||
} | |||
for _, k := range keys[1:] { | |||
members, err := db.SMembers(k) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, m := range members { | |||
if _, ok := destMap[String(m)]; !ok { | |||
continue | |||
} else if ok { | |||
delete(destMap, String(m)) | |||
} | |||
} | |||
// O - A = O, O is zero set. | |||
if len(destMap) == 0 { | |||
return nil, nil | |||
} | |||
} | |||
slice := make([][]byte, len(destMap)) | |||
idx := 0 | |||
for k, v := range destMap { | |||
if !v { | |||
continue | |||
} | |||
slice[idx] = []byte(k) | |||
idx++ | |||
} | |||
return slice, nil | |||
} | |||
func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { | |||
v, err := db.sDiffGeneric(keys...) | |||
return v, err | |||
} | |||
func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { | |||
n, err := db.sStoreGeneric(dstKey, DiffType, keys...) | |||
return n, err | |||
} | |||
func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { | |||
destMap := make(map[string]bool) | |||
members, err := db.SMembers(keys[0]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, m := range members { | |||
destMap[String(m)] = true | |||
} | |||
for _, key := range keys[1:] { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
members, err := db.SMembers(key) | |||
if err != nil { | |||
return nil, err | |||
} else if len(members) == 0 { | |||
return nil, err | |||
} | |||
tempMap := make(map[string]bool) | |||
for _, member := range members { | |||
if err := checkKeySize(member); err != nil { | |||
return nil, err | |||
} | |||
if _, ok := destMap[String(member)]; ok { | |||
tempMap[String(member)] = true //mark this item as selected | |||
} | |||
} | |||
destMap = tempMap //reduce the size of the result set | |||
if len(destMap) == 0 { | |||
return nil, nil | |||
} | |||
} | |||
slice := make([][]byte, len(destMap)) | |||
idx := 0 | |||
for k, v := range destMap { | |||
if !v { | |||
continue | |||
} | |||
slice[idx] = []byte(k) | |||
idx++ | |||
} | |||
return slice, nil | |||
} | |||
func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { | |||
v, err := db.sInterGeneric(keys...) | |||
return v, err | |||
} | |||
func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { | |||
n, err := db.sStoreGeneric(dstKey, InterType, keys...) | |||
return n, err | |||
} | |||
func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { | |||
ek := db.sEncodeSetKey(key, member) | |||
var n int64 = 1 | |||
if v, err := db.bucket.Get(ek); err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
n = 0 | |||
} | |||
return n, nil | |||
} | |||
func (db *DB) SMembers(key []byte) ([][]byte, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
start := db.sEncodeStartKey(key) | |||
stop := db.sEncodeStopKey(key) | |||
v := make([][]byte, 0, 16) | |||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
_, m, err := db.sDecodeSetKey(it.Key()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
v = append(v, m) | |||
} | |||
it.Close() | |||
return v, nil | |||
} | |||
func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var ek []byte | |||
var v []byte | |||
var err error | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
var num int64 = 0 | |||
for i := 0; i < len(args); i++ { | |||
if err := checkSetKMSize(key, args[i]); err != nil { | |||
return 0, err | |||
} | |||
ek = db.sEncodeSetKey(key, args[i]) | |||
v = it.RawFind(ek) | |||
if v == nil { | |||
continue | |||
} else { | |||
num++ | |||
t.Delete(ek) | |||
} | |||
} | |||
if _, err = db.sIncrSize(key, -num); err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { | |||
dstMap := make(map[string]bool) | |||
for _, key := range keys { | |||
if err := checkKeySize(key); err != nil { | |||
return nil, err | |||
} | |||
members, err := db.SMembers(key) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, member := range members { | |||
dstMap[String(member)] = true | |||
} | |||
} | |||
slice := make([][]byte, len(dstMap)) | |||
idx := 0 | |||
for k, v := range dstMap { | |||
if !v { | |||
continue | |||
} | |||
slice[idx] = []byte(k) | |||
idx++ | |||
} | |||
return slice, nil | |||
} | |||
func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { | |||
v, err := db.sUnionGeneric(keys...) | |||
return v, err | |||
} | |||
func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { | |||
n, err := db.sStoreGeneric(dstKey, UnionType, keys...) | |||
return n, err | |||
} | |||
func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) { | |||
if err := checkKeySize(dstKey); err != nil { | |||
return 0, err | |||
} | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
db.sDelete(t, dstKey) | |||
var err error | |||
var ek []byte | |||
var v [][]byte | |||
switch optType { | |||
case UnionType: | |||
v, err = db.sUnionGeneric(keys...) | |||
case DiffType: | |||
v, err = db.sDiffGeneric(keys...) | |||
case InterType: | |||
v, err = db.sInterGeneric(keys...) | |||
} | |||
if err != nil { | |||
return 0, err | |||
} | |||
for _, m := range v { | |||
if err := checkSetKMSize(dstKey, m); err != nil { | |||
return 0, err | |||
} | |||
ek = db.sEncodeSetKey(dstKey, m) | |||
if _, err := db.bucket.Get(ek); err != nil { | |||
return 0, err | |||
} | |||
t.Put(ek, nil) | |||
} | |||
var num = int64(len(v)) | |||
sk := db.sEncodeSizeKey(dstKey) | |||
t.Put(sk, PutInt64(num)) | |||
if err = t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
return num, nil | |||
} | |||
func (db *DB) SClear(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
num := db.sDelete(t, key) | |||
db.rmExpire(t, SetType, key) | |||
err := t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) SMclear(keys ...[]byte) (int64, error) { | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
for _, key := range keys { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
db.sDelete(t, key) | |||
db.rmExpire(t, SetType, key) | |||
} | |||
err := t.Commit() | |||
return int64(len(keys)), err | |||
} | |||
func (db *DB) SExpire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
return db.sExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
return db.sExpireAt(key, when) | |||
} | |||
func (db *DB) STTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(SetType, key) | |||
} | |||
func (db *DB) SPersist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.setBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, SetType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func (db *DB) SScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(SSizeType, key, count, inclusive, match) | |||
} |
@ -0,0 +1,195 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
var ( | |||
errExpMetaKey = errors.New("invalid expire meta key") | |||
errExpTimeKey = errors.New("invalid expire time key") | |||
) | |||
type retireCallback func(*batch, []byte) int64 | |||
type elimination struct { | |||
db *DB | |||
exp2Tx []*batch | |||
exp2Retire []retireCallback | |||
} | |||
var errExpType = errors.New("invalid expire type") | |||
func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte { | |||
buf := make([]byte, len(key)+11) | |||
buf[0] = db.index | |||
buf[1] = ExpTimeType | |||
buf[2] = dataType | |||
pos := 3 | |||
binary.BigEndian.PutUint64(buf[pos:], uint64(when)) | |||
pos += 8 | |||
copy(buf[pos:], key) | |||
return buf | |||
} | |||
func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte { | |||
buf := make([]byte, len(key)+3) | |||
buf[0] = db.index | |||
buf[1] = ExpMetaType | |||
buf[2] = dataType | |||
pos := 3 | |||
copy(buf[pos:], key) | |||
return buf | |||
} | |||
func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) { | |||
if len(mk) <= 3 || mk[0] != db.index || mk[1] != ExpMetaType { | |||
return 0, nil, errExpMetaKey | |||
} | |||
return mk[2], mk[3:], nil | |||
} | |||
func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) { | |||
if len(tk) < 11 || tk[0] != db.index || tk[1] != ExpTimeType { | |||
return 0, nil, 0, errExpTimeKey | |||
} | |||
return tk[2], tk[11:], int64(binary.BigEndian.Uint64(tk[3:])), nil | |||
} | |||
func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) { | |||
db.expireAt(t, dataType, key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) { | |||
mk := db.expEncodeMetaKey(dataType, key) | |||
tk := db.expEncodeTimeKey(dataType, key, when) | |||
t.Put(tk, mk) | |||
t.Put(mk, PutInt64(when)) | |||
} | |||
func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) { | |||
mk := db.expEncodeMetaKey(dataType, key) | |||
if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 { | |||
t = -1 | |||
} else { | |||
t -= time.Now().Unix() | |||
if t <= 0 { | |||
t = -1 | |||
} | |||
// if t == -1 : to remove ???? | |||
} | |||
return t, err | |||
} | |||
func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { | |||
mk := db.expEncodeMetaKey(dataType, key) | |||
if v, err := db.bucket.Get(mk); err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
return 0, nil | |||
} else if when, err2 := Int64(v, nil); err2 != nil { | |||
return 0, err2 | |||
} else { | |||
tk := db.expEncodeTimeKey(dataType, key, when) | |||
t.Delete(mk) | |||
t.Delete(tk) | |||
return 1, nil | |||
} | |||
} | |||
func (db *DB) expFlush(t *batch, dataType byte) (err error) { | |||
minKey := make([]byte, 3) | |||
minKey[0] = db.index | |||
minKey[1] = ExpTimeType | |||
minKey[2] = dataType | |||
maxKey := make([]byte, 3) | |||
maxKey[0] = db.index | |||
maxKey[1] = ExpMetaType | |||
maxKey[2] = dataType + 1 | |||
_, err = db.flushRegion(t, minKey, maxKey) | |||
err = t.Commit() | |||
return | |||
} | |||
////////////////////////////////////////////////////////// | |||
// | |||
////////////////////////////////////////////////////////// | |||
func newEliminator(db *DB) *elimination { | |||
eli := new(elimination) | |||
eli.db = db | |||
eli.exp2Tx = make([]*batch, maxDataType) | |||
eli.exp2Retire = make([]retireCallback, maxDataType) | |||
return eli | |||
} | |||
func (eli *elimination) regRetireContext(dataType byte, t *batch, onRetire retireCallback) { | |||
// todo .. need to ensure exist - mapExpMetaType[expType] | |||
eli.exp2Tx[dataType] = t | |||
eli.exp2Retire[dataType] = onRetire | |||
} | |||
// call by outside ... (from *db to another *db) | |||
func (eli *elimination) active() { | |||
now := time.Now().Unix() | |||
db := eli.db | |||
dbGet := db.bucket.Get | |||
minKey := db.expEncodeTimeKey(NoneType, nil, 0) | |||
maxKey := db.expEncodeTimeKey(maxDataType, nil, now) | |||
it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1) | |||
for ; it.Valid(); it.Next() { | |||
tk := it.RawKey() | |||
mk := it.RawValue() | |||
dt, k, _, err := db.expDecodeTimeKey(tk) | |||
if err != nil { | |||
continue | |||
} | |||
t := eli.exp2Tx[dt] | |||
onRetire := eli.exp2Retire[dt] | |||
if tk == nil || onRetire == nil { | |||
continue | |||
} | |||
t.Lock() | |||
if exp, err := Int64(dbGet(mk)); err == nil { | |||
// check expire again | |||
if exp <= now { | |||
onRetire(t, k) | |||
t.Delete(tk) | |||
t.Delete(mk) | |||
t.Commit() | |||
} | |||
} | |||
t.Unlock() | |||
} | |||
it.Close() | |||
return | |||
} |
@ -0,0 +1,943 @@ | |||
package nodb | |||
import ( | |||
"bytes" | |||
"encoding/binary" | |||
"errors" | |||
"time" | |||
"github.com/lunny/nodb/store" | |||
) | |||
const ( | |||
MinScore int64 = -1<<63 + 1 | |||
MaxScore int64 = 1<<63 - 1 | |||
InvalidScore int64 = -1 << 63 | |||
AggregateSum byte = 0 | |||
AggregateMin byte = 1 | |||
AggregateMax byte = 2 | |||
) | |||
type ScorePair struct { | |||
Score int64 | |||
Member []byte | |||
} | |||
var errZSizeKey = errors.New("invalid zsize key") | |||
var errZSetKey = errors.New("invalid zset key") | |||
var errZScoreKey = errors.New("invalid zscore key") | |||
var errScoreOverflow = errors.New("zset score overflow") | |||
var errInvalidAggregate = errors.New("invalid aggregate") | |||
var errInvalidWeightNum = errors.New("invalid weight number") | |||
var errInvalidSrcKeyNum = errors.New("invalid src key number") | |||
const ( | |||
zsetNScoreSep byte = '<' | |||
zsetPScoreSep byte = zsetNScoreSep + 1 | |||
zsetStopScoreSep byte = zsetPScoreSep + 1 | |||
zsetStartMemSep byte = ':' | |||
zsetStopMemSep byte = zsetStartMemSep + 1 | |||
) | |||
func checkZSetKMSize(key []byte, member []byte) error { | |||
if len(key) > MaxKeySize || len(key) == 0 { | |||
return errKeySize | |||
} else if len(member) > MaxZSetMemberSize || len(member) == 0 { | |||
return errZSetMemberSize | |||
} | |||
return nil | |||
} | |||
func (db *DB) zEncodeSizeKey(key []byte) []byte { | |||
buf := make([]byte, len(key)+2) | |||
buf[0] = db.index | |||
buf[1] = ZSizeType | |||
copy(buf[2:], key) | |||
return buf | |||
} | |||
func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) { | |||
if len(ek) < 2 || ek[0] != db.index || ek[1] != ZSizeType { | |||
return nil, errZSizeKey | |||
} | |||
return ek[2:], nil | |||
} | |||
func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte { | |||
buf := make([]byte, len(key)+len(member)+5) | |||
pos := 0 | |||
buf[pos] = db.index | |||
pos++ | |||
buf[pos] = ZSetType | |||
pos++ | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
buf[pos] = zsetStartMemSep | |||
pos++ | |||
copy(buf[pos:], member) | |||
return buf | |||
} | |||
func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) { | |||
if len(ek) < 5 || ek[0] != db.index || ek[1] != ZSetType { | |||
return nil, nil, errZSetKey | |||
} | |||
keyLen := int(binary.BigEndian.Uint16(ek[2:])) | |||
if keyLen+5 > len(ek) { | |||
return nil, nil, errZSetKey | |||
} | |||
key := ek[4 : 4+keyLen] | |||
if ek[4+keyLen] != zsetStartMemSep { | |||
return nil, nil, errZSetKey | |||
} | |||
member := ek[5+keyLen:] | |||
return key, member, nil | |||
} | |||
func (db *DB) zEncodeStartSetKey(key []byte) []byte { | |||
k := db.zEncodeSetKey(key, nil) | |||
return k | |||
} | |||
func (db *DB) zEncodeStopSetKey(key []byte) []byte { | |||
k := db.zEncodeSetKey(key, nil) | |||
k[len(k)-1] = zsetStartMemSep + 1 | |||
return k | |||
} | |||
func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte { | |||
buf := make([]byte, len(key)+len(member)+14) | |||
pos := 0 | |||
buf[pos] = db.index | |||
pos++ | |||
buf[pos] = ZScoreType | |||
pos++ | |||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) | |||
pos += 2 | |||
copy(buf[pos:], key) | |||
pos += len(key) | |||
if score < 0 { | |||
buf[pos] = zsetNScoreSep | |||
} else { | |||
buf[pos] = zsetPScoreSep | |||
} | |||
pos++ | |||
binary.BigEndian.PutUint64(buf[pos:], uint64(score)) | |||
pos += 8 | |||
buf[pos] = zsetStartMemSep | |||
pos++ | |||
copy(buf[pos:], member) | |||
return buf | |||
} | |||
func (db *DB) zEncodeStartScoreKey(key []byte, score int64) []byte { | |||
return db.zEncodeScoreKey(key, nil, score) | |||
} | |||
func (db *DB) zEncodeStopScoreKey(key []byte, score int64) []byte { | |||
k := db.zEncodeScoreKey(key, nil, score) | |||
k[len(k)-1] = zsetStopMemSep | |||
return k | |||
} | |||
func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) { | |||
if len(ek) < 14 || ek[0] != db.index || ek[1] != ZScoreType { | |||
err = errZScoreKey | |||
return | |||
} | |||
keyLen := int(binary.BigEndian.Uint16(ek[2:])) | |||
if keyLen+14 > len(ek) { | |||
err = errZScoreKey | |||
return | |||
} | |||
key = ek[4 : 4+keyLen] | |||
pos := 4 + keyLen | |||
if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) { | |||
err = errZScoreKey | |||
return | |||
} | |||
pos++ | |||
score = int64(binary.BigEndian.Uint64(ek[pos:])) | |||
pos += 8 | |||
if ek[pos] != zsetStartMemSep { | |||
err = errZScoreKey | |||
return | |||
} | |||
pos++ | |||
member = ek[pos:] | |||
return | |||
} | |||
func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, error) { | |||
if score <= MinScore || score >= MaxScore { | |||
return 0, errScoreOverflow | |||
} | |||
var exists int64 = 0 | |||
ek := db.zEncodeSetKey(key, member) | |||
if v, err := db.bucket.Get(ek); err != nil { | |||
return 0, err | |||
} else if v != nil { | |||
exists = 1 | |||
if s, err := Int64(v, err); err != nil { | |||
return 0, err | |||
} else { | |||
sk := db.zEncodeScoreKey(key, member, s) | |||
t.Delete(sk) | |||
} | |||
} | |||
t.Put(ek, PutInt64(score)) | |||
sk := db.zEncodeScoreKey(key, member, score) | |||
t.Put(sk, []byte{}) | |||
return exists, nil | |||
} | |||
func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) (int64, error) { | |||
ek := db.zEncodeSetKey(key, member) | |||
if v, err := db.bucket.Get(ek); err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
//not exists | |||
return 0, nil | |||
} else { | |||
//exists | |||
if !skipDelScore { | |||
//we must del score | |||
if s, err := Int64(v, err); err != nil { | |||
return 0, err | |||
} else { | |||
sk := db.zEncodeScoreKey(key, member, s) | |||
t.Delete(sk) | |||
} | |||
} | |||
} | |||
t.Delete(ek) | |||
return 1, nil | |||
} | |||
func (db *DB) zDelete(t *batch, key []byte) int64 { | |||
delMembCnt, _ := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) | |||
// todo : log err | |||
return delMembCnt | |||
} | |||
func (db *DB) zExpireAt(key []byte, when int64) (int64, error) { | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 { | |||
return 0, err | |||
} else { | |||
db.expireAt(t, ZSetType, key, when) | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
} | |||
return 1, nil | |||
} | |||
func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { | |||
if len(args) == 0 { | |||
return 0, nil | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var num int64 = 0 | |||
for i := 0; i < len(args); i++ { | |||
score := args[i].Score | |||
member := args[i].Member | |||
if err := checkZSetKMSize(key, member); err != nil { | |||
return 0, err | |||
} | |||
if n, err := db.zSetItem(t, key, score, member); err != nil { | |||
return 0, err | |||
} else if n == 0 { | |||
//add new | |||
num++ | |||
} | |||
} | |||
if _, err := db.zIncrSize(t, key, num); err != nil { | |||
return 0, err | |||
} | |||
//todo add binlog | |||
err := t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) { | |||
sk := db.zEncodeSizeKey(key) | |||
size, err := Int64(db.bucket.Get(sk)) | |||
if err != nil { | |||
return 0, err | |||
} else { | |||
size += delta | |||
if size <= 0 { | |||
size = 0 | |||
t.Delete(sk) | |||
db.rmExpire(t, ZSetType, key) | |||
} else { | |||
t.Put(sk, PutInt64(size)) | |||
} | |||
} | |||
return size, nil | |||
} | |||
func (db *DB) ZCard(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
sk := db.zEncodeSizeKey(key) | |||
return Int64(db.bucket.Get(sk)) | |||
} | |||
func (db *DB) ZScore(key []byte, member []byte) (int64, error) { | |||
if err := checkZSetKMSize(key, member); err != nil { | |||
return InvalidScore, err | |||
} | |||
var score int64 = InvalidScore | |||
k := db.zEncodeSetKey(key, member) | |||
if v, err := db.bucket.Get(k); err != nil { | |||
return InvalidScore, err | |||
} else if v == nil { | |||
return InvalidScore, ErrScoreMiss | |||
} else { | |||
if score, err = Int64(v, nil); err != nil { | |||
return InvalidScore, err | |||
} | |||
} | |||
return score, nil | |||
} | |||
func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { | |||
if len(members) == 0 { | |||
return 0, nil | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
var num int64 = 0 | |||
for i := 0; i < len(members); i++ { | |||
if err := checkZSetKMSize(key, members[i]); err != nil { | |||
return 0, err | |||
} | |||
if n, err := db.zDelItem(t, key, members[i], false); err != nil { | |||
return 0, err | |||
} else if n == 1 { | |||
num++ | |||
} | |||
} | |||
if _, err := db.zIncrSize(t, key, -num); err != nil { | |||
return 0, err | |||
} | |||
err := t.Commit() | |||
return num, err | |||
} | |||
func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { | |||
if err := checkZSetKMSize(key, member); err != nil { | |||
return InvalidScore, err | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
ek := db.zEncodeSetKey(key, member) | |||
var oldScore int64 = 0 | |||
v, err := db.bucket.Get(ek) | |||
if err != nil { | |||
return InvalidScore, err | |||
} else if v == nil { | |||
db.zIncrSize(t, key, 1) | |||
} else { | |||
if oldScore, err = Int64(v, err); err != nil { | |||
return InvalidScore, err | |||
} | |||
} | |||
newScore := oldScore + delta | |||
if newScore >= MaxScore || newScore <= MinScore { | |||
return InvalidScore, errScoreOverflow | |||
} | |||
sk := db.zEncodeScoreKey(key, member, newScore) | |||
t.Put(sk, []byte{}) | |||
t.Put(ek, PutInt64(newScore)) | |||
if v != nil { | |||
// so as to update score, we must delete the old one | |||
oldSk := db.zEncodeScoreKey(key, member, oldScore) | |||
t.Delete(oldSk) | |||
} | |||
err = t.Commit() | |||
return newScore, err | |||
} | |||
func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
minKey := db.zEncodeStartScoreKey(key, min) | |||
maxKey := db.zEncodeStopScoreKey(key, max) | |||
rangeType := store.RangeROpen | |||
it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1) | |||
var n int64 = 0 | |||
for ; it.Valid(); it.Next() { | |||
n++ | |||
} | |||
it.Close() | |||
return n, nil | |||
} | |||
func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) { | |||
if err := checkZSetKMSize(key, member); err != nil { | |||
return 0, err | |||
} | |||
k := db.zEncodeSetKey(key, member) | |||
it := db.bucket.NewIterator() | |||
defer it.Close() | |||
if v := it.Find(k); v == nil { | |||
return -1, nil | |||
} else { | |||
if s, err := Int64(v, nil); err != nil { | |||
return 0, err | |||
} else { | |||
var rit *store.RangeLimitIterator | |||
sk := db.zEncodeScoreKey(key, member, s) | |||
if !reverse { | |||
minKey := db.zEncodeStartScoreKey(key, MinScore) | |||
rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose}) | |||
} else { | |||
maxKey := db.zEncodeStopScoreKey(key, MaxScore) | |||
rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose}) | |||
} | |||
var lastKey []byte = nil | |||
var n int64 = 0 | |||
for ; rit.Valid(); rit.Next() { | |||
n++ | |||
lastKey = rit.BufKey(lastKey) | |||
} | |||
if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) { | |||
n-- | |||
return n, nil | |||
} | |||
} | |||
} | |||
return -1, nil | |||
} | |||
func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int, reverse bool) *store.RangeLimitIterator { | |||
minKey := db.zEncodeStartScoreKey(key, min) | |||
maxKey := db.zEncodeStopScoreKey(key, max) | |||
if !reverse { | |||
return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) | |||
} else { | |||
return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) | |||
} | |||
} | |||
func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) { | |||
if len(key) > MaxKeySize { | |||
return 0, errKeySize | |||
} | |||
it := db.zIterator(key, min, max, offset, count, false) | |||
var num int64 = 0 | |||
for ; it.Valid(); it.Next() { | |||
sk := it.RawKey() | |||
_, m, _, err := db.zDecodeScoreKey(sk) | |||
if err != nil { | |||
continue | |||
} | |||
if n, err := db.zDelItem(t, key, m, true); err != nil { | |||
return 0, err | |||
} else if n == 1 { | |||
num++ | |||
} | |||
t.Delete(sk) | |||
} | |||
it.Close() | |||
if _, err := db.zIncrSize(t, key, -num); err != nil { | |||
return 0, err | |||
} | |||
return num, nil | |||
} | |||
func (db *DB) zRange(key []byte, min int64, max int64, offset int, count int, reverse bool) ([]ScorePair, error) { | |||
if len(key) > MaxKeySize { | |||
return nil, errKeySize | |||
} | |||
if offset < 0 { | |||
return []ScorePair{}, nil | |||
} | |||
nv := 64 | |||
if count > 0 { | |||
nv = count | |||
} | |||
v := make([]ScorePair, 0, nv) | |||
var it *store.RangeLimitIterator | |||
//if reverse and offset is 0, count < 0, we may use forward iterator then reverse | |||
//because store iterator prev is slower than next | |||
if !reverse || (offset == 0 && count < 0) { | |||
it = db.zIterator(key, min, max, offset, count, false) | |||
} else { | |||
it = db.zIterator(key, min, max, offset, count, true) | |||
} | |||
for ; it.Valid(); it.Next() { | |||
_, m, s, err := db.zDecodeScoreKey(it.Key()) | |||
//may be we will check key equal? | |||
if err != nil { | |||
continue | |||
} | |||
v = append(v, ScorePair{Member: m, Score: s}) | |||
} | |||
it.Close() | |||
if reverse && (offset == 0 && count < 0) { | |||
for i, j := 0, len(v)-1; i < j; i, j = i+1, j-1 { | |||
v[i], v[j] = v[j], v[i] | |||
} | |||
} | |||
return v, nil | |||
} | |||
func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count int, err error) { | |||
if start < 0 || stop < 0 { | |||
//refer redis implementation | |||
var size int64 | |||
size, err = db.ZCard(key) | |||
if err != nil { | |||
return | |||
} | |||
llen := int(size) | |||
if start < 0 { | |||
start = llen + start | |||
} | |||
if stop < 0 { | |||
stop = llen + stop | |||
} | |||
if start < 0 { | |||
start = 0 | |||
} | |||
if start >= llen { | |||
offset = -1 | |||
return | |||
} | |||
} | |||
if start > stop { | |||
offset = -1 | |||
return | |||
} | |||
offset = start | |||
count = (stop - start) + 1 | |||
return | |||
} | |||
func (db *DB) ZClear(key []byte) (int64, error) { | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
rmCnt, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) | |||
if err == nil { | |||
err = t.Commit() | |||
} | |||
return rmCnt, err | |||
} | |||
func (db *DB) ZMclear(keys ...[]byte) (int64, error) { | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
for _, key := range keys { | |||
if _, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1); err != nil { | |||
return 0, err | |||
} | |||
} | |||
err := t.Commit() | |||
return int64(len(keys)), err | |||
} | |||
func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) { | |||
return db.ZRangeGeneric(key, start, stop, false) | |||
} | |||
//min and max must be inclusive | |||
//if no limit, set offset = 0 and count = -1 | |||
func (db *DB) ZRangeByScore(key []byte, min int64, max int64, | |||
offset int, count int) ([]ScorePair, error) { | |||
return db.ZRangeByScoreGeneric(key, min, max, offset, count, false) | |||
} | |||
func (db *DB) ZRank(key []byte, member []byte) (int64, error) { | |||
return db.zrank(key, member, false) | |||
} | |||
func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { | |||
offset, count, err := db.zParseLimit(key, start, stop) | |||
if err != nil { | |||
return 0, err | |||
} | |||
var rmCnt int64 | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
rmCnt, err = db.zRemRange(t, key, MinScore, MaxScore, offset, count) | |||
if err == nil { | |||
err = t.Commit() | |||
} | |||
return rmCnt, err | |||
} | |||
//min and max must be inclusive | |||
func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) { | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
rmCnt, err := db.zRemRange(t, key, min, max, 0, -1) | |||
if err == nil { | |||
err = t.Commit() | |||
} | |||
return rmCnt, err | |||
} | |||
func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) { | |||
return db.ZRangeGeneric(key, start, stop, true) | |||
} | |||
func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) { | |||
return db.zrank(key, member, true) | |||
} | |||
//min and max must be inclusive | |||
//if no limit, set offset = 0 and count = -1 | |||
func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { | |||
return db.ZRangeByScoreGeneric(key, min, max, offset, count, true) | |||
} | |||
func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) { | |||
offset, count, err := db.zParseLimit(key, start, stop) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return db.zRange(key, MinScore, MaxScore, offset, count, reverse) | |||
} | |||
//min and max must be inclusive | |||
//if no limit, set offset = 0 and count = -1 | |||
func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64, | |||
offset int, count int, reverse bool) ([]ScorePair, error) { | |||
return db.zRange(key, min, max, offset, count, reverse) | |||
} | |||
func (db *DB) zFlush() (drop int64, err error) { | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
return db.flushType(t, ZSetType) | |||
} | |||
func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { | |||
if duration <= 0 { | |||
return 0, errExpireValue | |||
} | |||
return db.zExpireAt(key, time.Now().Unix()+duration) | |||
} | |||
func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { | |||
if when <= time.Now().Unix() { | |||
return 0, errExpireValue | |||
} | |||
return db.zExpireAt(key, when) | |||
} | |||
func (db *DB) ZTTL(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return -1, err | |||
} | |||
return db.ttl(ZSetType, key) | |||
} | |||
func (db *DB) ZPersist(key []byte) (int64, error) { | |||
if err := checkKeySize(key); err != nil { | |||
return 0, err | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
n, err := db.rmExpire(t, ZSetType, key) | |||
if err != nil { | |||
return 0, err | |||
} | |||
err = t.Commit() | |||
return n, err | |||
} | |||
func getAggregateFunc(aggregate byte) func(int64, int64) int64 { | |||
switch aggregate { | |||
case AggregateSum: | |||
return func(a int64, b int64) int64 { | |||
return a + b | |||
} | |||
case AggregateMax: | |||
return func(a int64, b int64) int64 { | |||
if a > b { | |||
return a | |||
} | |||
return b | |||
} | |||
case AggregateMin: | |||
return func(a int64, b int64) int64 { | |||
if a > b { | |||
return b | |||
} | |||
return a | |||
} | |||
} | |||
return nil | |||
} | |||
func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { | |||
var destMap = map[string]int64{} | |||
aggregateFunc := getAggregateFunc(aggregate) | |||
if aggregateFunc == nil { | |||
return 0, errInvalidAggregate | |||
} | |||
if len(srcKeys) < 1 { | |||
return 0, errInvalidSrcKeyNum | |||
} | |||
if weights != nil { | |||
if len(srcKeys) != len(weights) { | |||
return 0, errInvalidWeightNum | |||
} | |||
} else { | |||
weights = make([]int64, len(srcKeys)) | |||
for i := 0; i < len(weights); i++ { | |||
weights[i] = 1 | |||
} | |||
} | |||
for i, key := range srcKeys { | |||
scorePairs, err := db.ZRange(key, 0, -1) | |||
if err != nil { | |||
return 0, err | |||
} | |||
for _, pair := range scorePairs { | |||
if score, ok := destMap[String(pair.Member)]; !ok { | |||
destMap[String(pair.Member)] = pair.Score * weights[i] | |||
} else { | |||
destMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i]) | |||
} | |||
} | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
db.zDelete(t, destKey) | |||
for member, score := range destMap { | |||
if err := checkZSetKMSize(destKey, []byte(member)); err != nil { | |||
return 0, err | |||
} | |||
if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { | |||
return 0, err | |||
} | |||
} | |||
var num = int64(len(destMap)) | |||
sk := db.zEncodeSizeKey(destKey) | |||
t.Put(sk, PutInt64(num)) | |||
//todo add binlog | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
return num, nil | |||
} | |||
func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { | |||
aggregateFunc := getAggregateFunc(aggregate) | |||
if aggregateFunc == nil { | |||
return 0, errInvalidAggregate | |||
} | |||
if len(srcKeys) < 1 { | |||
return 0, errInvalidSrcKeyNum | |||
} | |||
if weights != nil { | |||
if len(srcKeys) != len(weights) { | |||
return 0, errInvalidWeightNum | |||
} | |||
} else { | |||
weights = make([]int64, len(srcKeys)) | |||
for i := 0; i < len(weights); i++ { | |||
weights[i] = 1 | |||
} | |||
} | |||
var destMap = map[string]int64{} | |||
scorePairs, err := db.ZRange(srcKeys[0], 0, -1) | |||
if err != nil { | |||
return 0, err | |||
} | |||
for _, pair := range scorePairs { | |||
destMap[String(pair.Member)] = pair.Score * weights[0] | |||
} | |||
for i, key := range srcKeys[1:] { | |||
scorePairs, err := db.ZRange(key, 0, -1) | |||
if err != nil { | |||
return 0, err | |||
} | |||
tmpMap := map[string]int64{} | |||
for _, pair := range scorePairs { | |||
if score, ok := destMap[String(pair.Member)]; ok { | |||
tmpMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1]) | |||
} | |||
} | |||
destMap = tmpMap | |||
} | |||
t := db.zsetBatch | |||
t.Lock() | |||
defer t.Unlock() | |||
db.zDelete(t, destKey) | |||
for member, score := range destMap { | |||
if err := checkZSetKMSize(destKey, []byte(member)); err != nil { | |||
return 0, err | |||
} | |||
if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { | |||
return 0, err | |||
} | |||
} | |||
var num int64 = int64(len(destMap)) | |||
sk := db.zEncodeSizeKey(destKey) | |||
t.Put(sk, PutInt64(num)) | |||
//todo add binlog | |||
if err := t.Commit(); err != nil { | |||
return 0, err | |||
} | |||
return num, nil | |||
} | |||
func (db *DB) ZScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { | |||
return db.scan(ZSizeType, key, count, inclusive, match) | |||
} |
@ -0,0 +1,113 @@ | |||
package nodb | |||
import ( | |||
"errors" | |||
"fmt" | |||
"github.com/lunny/nodb/store" | |||
) | |||
var ( | |||
ErrNestTx = errors.New("nest transaction not supported") | |||
ErrTxDone = errors.New("Transaction has already been committed or rolled back") | |||
) | |||
type Tx struct { | |||
*DB | |||
tx *store.Tx | |||
logs [][]byte | |||
} | |||
func (db *DB) IsTransaction() bool { | |||
return db.status == DBInTransaction | |||
} | |||
// Begin a transaction, it will block all other write operations before calling Commit or Rollback. | |||
// You must be very careful to prevent long-time transaction. | |||
func (db *DB) Begin() (*Tx, error) { | |||
if db.IsTransaction() { | |||
return nil, ErrNestTx | |||
} | |||
tx := new(Tx) | |||
tx.DB = new(DB) | |||
tx.DB.l = db.l | |||
tx.l.wLock.Lock() | |||
tx.DB.sdb = db.sdb | |||
var err error | |||
tx.tx, err = db.sdb.Begin() | |||
if err != nil { | |||
tx.l.wLock.Unlock() | |||
return nil, err | |||
} | |||
tx.DB.bucket = tx.tx | |||
tx.DB.status = DBInTransaction | |||
tx.DB.index = db.index | |||
tx.DB.kvBatch = tx.newBatch() | |||
tx.DB.listBatch = tx.newBatch() | |||
tx.DB.hashBatch = tx.newBatch() | |||
tx.DB.zsetBatch = tx.newBatch() | |||
tx.DB.binBatch = tx.newBatch() | |||
tx.DB.setBatch = tx.newBatch() | |||
return tx, nil | |||
} | |||
func (tx *Tx) Commit() error { | |||
if tx.tx == nil { | |||
return ErrTxDone | |||
} | |||
tx.l.commitLock.Lock() | |||
err := tx.tx.Commit() | |||
tx.tx = nil | |||
if len(tx.logs) > 0 { | |||
tx.l.binlog.Log(tx.logs...) | |||
} | |||
tx.l.commitLock.Unlock() | |||
tx.l.wLock.Unlock() | |||
tx.DB.bucket = nil | |||
return err | |||
} | |||
func (tx *Tx) Rollback() error { | |||
if tx.tx == nil { | |||
return ErrTxDone | |||
} | |||
err := tx.tx.Rollback() | |||
tx.tx = nil | |||
tx.l.wLock.Unlock() | |||
tx.DB.bucket = nil | |||
return err | |||
} | |||
func (tx *Tx) newBatch() *batch { | |||
return tx.l.newBatch(tx.tx.NewWriteBatch(), &txBatchLocker{}, tx) | |||
} | |||
func (tx *Tx) Select(index int) error { | |||
if index < 0 || index >= int(MaxDBNumber) { | |||
return fmt.Errorf("invalid db index %d", index) | |||
} | |||
tx.DB.index = uint8(index) | |||
return nil | |||
} |
@ -0,0 +1,113 @@ | |||
package nodb | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"reflect" | |||
"strconv" | |||
"unsafe" | |||
) | |||
var errIntNumber = errors.New("invalid integer") | |||
// no copy to change slice to string | |||
// use your own risk | |||
func String(b []byte) (s string) { | |||
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | |||
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) | |||
pstring.Data = pbytes.Data | |||
pstring.Len = pbytes.Len | |||
return | |||
} | |||
// no copy to change string to slice | |||
// use your own risk | |||
func Slice(s string) (b []byte) { | |||
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | |||
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) | |||
pbytes.Data = pstring.Data | |||
pbytes.Len = pstring.Len | |||
pbytes.Cap = pstring.Len | |||
return | |||
} | |||
func Int64(v []byte, err error) (int64, error) { | |||
if err != nil { | |||
return 0, err | |||
} else if v == nil || len(v) == 0 { | |||
return 0, nil | |||
} else if len(v) != 8 { | |||
return 0, errIntNumber | |||
} | |||
return int64(binary.LittleEndian.Uint64(v)), nil | |||
} | |||
func PutInt64(v int64) []byte { | |||
var b []byte | |||
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | |||
pbytes.Data = uintptr(unsafe.Pointer(&v)) | |||
pbytes.Len = 8 | |||
pbytes.Cap = 8 | |||
return b | |||
} | |||
func StrInt64(v []byte, err error) (int64, error) { | |||
if err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
return 0, nil | |||
} else { | |||
return strconv.ParseInt(String(v), 10, 64) | |||
} | |||
} | |||
func StrInt32(v []byte, err error) (int32, error) { | |||
if err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
return 0, nil | |||
} else { | |||
res, err := strconv.ParseInt(String(v), 10, 32) | |||
return int32(res), err | |||
} | |||
} | |||
func StrInt8(v []byte, err error) (int8, error) { | |||
if err != nil { | |||
return 0, err | |||
} else if v == nil { | |||
return 0, nil | |||
} else { | |||
res, err := strconv.ParseInt(String(v), 10, 8) | |||
return int8(res), err | |||
} | |||
} | |||
func StrPutInt64(v int64) []byte { | |||
return strconv.AppendInt(nil, v, 10) | |||
} | |||
func MinUInt32(a uint32, b uint32) uint32 { | |||
if a > b { | |||
return b | |||
} else { | |||
return a | |||
} | |||
} | |||
func MaxUInt32(a uint32, b uint32) uint32 { | |||
if a > b { | |||
return a | |||
} else { | |||
return b | |||
} | |||
} | |||
func MaxInt32(a int32, b int32) int32 { | |||
if a > b { | |||
return a | |||
} else { | |||
return b | |||
} | |||
} |
@ -0,0 +1,23 @@ | |||
Copyright (c) 2015, Dave Cheney <dave@cheney.net> | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are met: | |||
* Redistributions of source code must retain the above copyright notice, this | |||
list of conditions and the following disclaimer. | |||
* Redistributions in binary form must reproduce the above copyright notice, | |||
this list of conditions and the following disclaimer in the documentation | |||
and/or other materials provided with the distribution. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,282 @@ | |||
// Package errors provides simple error handling primitives. | |||
// | |||
// The traditional error handling idiom in Go is roughly akin to | |||
// | |||
// if err != nil { | |||
// return err | |||
// } | |||
// | |||
// which when applied recursively up the call stack results in error reports | |||
// without context or debugging information. The errors package allows | |||
// programmers to add context to the failure path in their code in a way | |||
// that does not destroy the original value of the error. | |||
// | |||
// Adding context to an error | |||
// | |||
// The errors.Wrap function returns a new error that adds context to the | |||
// original error by recording a stack trace at the point Wrap is called, | |||
// together with the supplied message. For example | |||
// | |||
// _, err := ioutil.ReadAll(r) | |||
// if err != nil { | |||
// return errors.Wrap(err, "read failed") | |||
// } | |||
// | |||
// If additional control is required, the errors.WithStack and | |||
// errors.WithMessage functions destructure errors.Wrap into its component | |||
// operations: annotating an error with a stack trace and with a message, | |||
// respectively. | |||
// | |||
// Retrieving the cause of an error | |||
// | |||
// Using errors.Wrap constructs a stack of errors, adding context to the | |||
// preceding error. Depending on the nature of the error it may be necessary | |||
// to reverse the operation of errors.Wrap to retrieve the original error | |||
// for inspection. Any error value which implements this interface | |||
// | |||
// type causer interface { | |||
// Cause() error | |||
// } | |||
// | |||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve | |||
// the topmost error that does not implement causer, which is assumed to be | |||
// the original cause. For example: | |||
// | |||
// switch err := errors.Cause(err).(type) { | |||
// case *MyError: | |||
// // handle specifically | |||
// default: | |||
// // unknown error | |||
// } | |||
// | |||
// Although the causer interface is not exported by this package, it is | |||
// considered a part of its stable public interface. | |||
// | |||
// Formatted printing of errors | |||
// | |||
// All error values returned from this package implement fmt.Formatter and can | |||
// be formatted by the fmt package. The following verbs are supported: | |||
// | |||
// %s print the error. If the error has a Cause it will be | |||
// printed recursively. | |||
// %v see %s | |||
// %+v extended format. Each Frame of the error's StackTrace will | |||
// be printed in detail. | |||
// | |||
// Retrieving the stack trace of an error or wrapper | |||
// | |||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are | |||
// invoked. This information can be retrieved with the following interface: | |||
// | |||
// type stackTracer interface { | |||
// StackTrace() errors.StackTrace | |||
// } | |||
// | |||
// The returned errors.StackTrace type is defined as | |||
// | |||
// type StackTrace []Frame | |||
// | |||
// The Frame type represents a call site in the stack trace. Frame supports | |||
// the fmt.Formatter interface that can be used for printing information about | |||
// the stack trace of this error. For example: | |||
// | |||
// if err, ok := err.(stackTracer); ok { | |||
// for _, f := range err.StackTrace() { | |||
// fmt.Printf("%+s:%d", f) | |||
// } | |||
// } | |||
// | |||
// Although the stackTracer interface is not exported by this package, it is | |||
// considered a part of its stable public interface. | |||
// | |||
// See the documentation for Frame.Format for more details. | |||
package errors | |||
import ( | |||
"fmt" | |||
"io" | |||
) | |||
// New returns an error with the supplied message. | |||
// New also records the stack trace at the point it was called. | |||
func New(message string) error { | |||
return &fundamental{ | |||
msg: message, | |||
stack: callers(), | |||
} | |||
} | |||
// Errorf formats according to a format specifier and returns the string | |||
// as a value that satisfies error. | |||
// Errorf also records the stack trace at the point it was called. | |||
func Errorf(format string, args ...interface{}) error { | |||
return &fundamental{ | |||
msg: fmt.Sprintf(format, args...), | |||
stack: callers(), | |||
} | |||
} | |||
// fundamental is an error that has a message and a stack, but no caller. | |||
type fundamental struct { | |||
msg string | |||
*stack | |||
} | |||
func (f *fundamental) Error() string { return f.msg } | |||
func (f *fundamental) Format(s fmt.State, verb rune) { | |||
switch verb { | |||
case 'v': | |||
if s.Flag('+') { | |||
io.WriteString(s, f.msg) | |||
f.stack.Format(s, verb) | |||
return | |||
} | |||
fallthrough | |||
case 's': | |||
io.WriteString(s, f.msg) | |||
case 'q': | |||
fmt.Fprintf(s, "%q", f.msg) | |||
} | |||
} | |||
// WithStack annotates err with a stack trace at the point WithStack was called. | |||
// If err is nil, WithStack returns nil. | |||
func WithStack(err error) error { | |||
if err == nil { | |||
return nil | |||
} | |||
return &withStack{ | |||
err, | |||
callers(), | |||
} | |||
} | |||
type withStack struct { | |||
error | |||
*stack | |||
} | |||
func (w *withStack) Cause() error { return w.error } | |||
func (w *withStack) Format(s fmt.State, verb rune) { | |||
switch verb { | |||
case 'v': | |||
if s.Flag('+') { | |||
fmt.Fprintf(s, "%+v", w.Cause()) | |||
w.stack.Format(s, verb) | |||
return | |||
} | |||
fallthrough | |||
case 's': | |||
io.WriteString(s, w.Error()) | |||
case 'q': | |||
fmt.Fprintf(s, "%q", w.Error()) | |||
} | |||
} | |||
// Wrap returns an error annotating err with a stack trace | |||
// at the point Wrap is called, and the supplied message. | |||
// If err is nil, Wrap returns nil. | |||
func Wrap(err error, message string) error { | |||
if err == nil { | |||
return nil | |||
} | |||
err = &withMessage{ | |||
cause: err, | |||
msg: message, | |||
} | |||
return &withStack{ | |||
err, | |||
callers(), | |||
} | |||
} | |||
// Wrapf returns an error annotating err with a stack trace | |||
// at the point Wrapf is called, and the format specifier. | |||
// If err is nil, Wrapf returns nil. | |||
func Wrapf(err error, format string, args ...interface{}) error { | |||
if err == nil { | |||
return nil | |||
} | |||
err = &withMessage{ | |||
cause: err, | |||
msg: fmt.Sprintf(format, args...), | |||
} | |||
return &withStack{ | |||
err, | |||
callers(), | |||
} | |||
} | |||
// WithMessage annotates err with a new message. | |||
// If err is nil, WithMessage returns nil. | |||
func WithMessage(err error, message string) error { | |||
if err == nil { | |||
return nil | |||
} | |||
return &withMessage{ | |||
cause: err, | |||
msg: message, | |||
} | |||
} | |||
// WithMessagef annotates err with the format specifier. | |||
// If err is nil, WithMessagef returns nil. | |||
func WithMessagef(err error, format string, args ...interface{}) error { | |||
if err == nil { | |||
return nil | |||
} | |||
return &withMessage{ | |||
cause: err, | |||
msg: fmt.Sprintf(format, args...), | |||
} | |||
} | |||
type withMessage struct { | |||
cause error | |||
msg string | |||
} | |||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } | |||
func (w *withMessage) Cause() error { return w.cause } | |||
func (w *withMessage) Format(s fmt.State, verb rune) { | |||
switch verb { | |||
case 'v': | |||
if s.Flag('+') { | |||
fmt.Fprintf(s, "%+v\n", w.Cause()) | |||
io.WriteString(s, w.msg) | |||
return | |||
} | |||
fallthrough | |||
case 's', 'q': | |||
io.WriteString(s, w.Error()) | |||
} | |||
} | |||
// Cause returns the underlying cause of the error, if possible. | |||
// An error value has a cause if it implements the following | |||
// interface: | |||
// | |||
// type causer interface { | |||
// Cause() error | |||
// } | |||
// | |||
// If the error does not implement Cause, the original error will | |||
// be returned. If the error is nil, nil will be returned without further | |||
// investigation. | |||
func Cause(err error) error { | |||
type causer interface { | |||
Cause() error | |||
} | |||
for err != nil { | |||
cause, ok := err.(causer) | |||
if !ok { | |||
break | |||
} | |||
err = cause.Cause() | |||
} | |||
return err | |||
} |
@ -0,0 +1,147 @@ | |||
package errors | |||
import ( | |||
"fmt" | |||
"io" | |||
"path" | |||
"runtime" | |||
"strings" | |||
) | |||
// Frame represents a program counter inside a stack frame. | |||
type Frame uintptr | |||
// pc returns the program counter for this frame; | |||
// multiple frames may have the same PC value. | |||
func (f Frame) pc() uintptr { return uintptr(f) - 1 } | |||
// file returns the full path to the file that contains the | |||
// function for this Frame's pc. | |||
func (f Frame) file() string { | |||
fn := runtime.FuncForPC(f.pc()) | |||
if fn == nil { | |||
return "unknown" | |||
} | |||
file, _ := fn.FileLine(f.pc()) | |||
return file | |||
} | |||
// line returns the line number of source code of the | |||
// function for this Frame's pc. | |||
func (f Frame) line() int { | |||
fn := runtime.FuncForPC(f.pc()) | |||
if fn == nil { | |||
return 0 | |||
} | |||
_, line := fn.FileLine(f.pc()) | |||
return line | |||
} | |||
// Format formats the frame according to the fmt.Formatter interface. | |||
// | |||
// %s source file | |||
// %d source line | |||
// %n function name | |||
// %v equivalent to %s:%d | |||
// | |||
// Format accepts flags that alter the printing of some verbs, as follows: | |||
// | |||
// %+s function name and path of source file relative to the compile time | |||
// GOPATH separated by \n\t (<funcname>\n\t<path>) | |||
// %+v equivalent to %+s:%d | |||
func (f Frame) Format(s fmt.State, verb rune) { | |||
switch verb { | |||
case 's': | |||
switch { | |||
case s.Flag('+'): | |||
pc := f.pc() | |||
fn := runtime.FuncForPC(pc) | |||
if fn == nil { | |||
io.WriteString(s, "unknown") | |||
} else { | |||
file, _ := fn.FileLine(pc) | |||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) | |||
} | |||
default: | |||
io.WriteString(s, path.Base(f.file())) | |||
} | |||
case 'd': | |||
fmt.Fprintf(s, "%d", f.line()) | |||
case 'n': | |||
name := runtime.FuncForPC(f.pc()).Name() | |||
io.WriteString(s, funcname(name)) | |||
case 'v': | |||
f.Format(s, 's') | |||
io.WriteString(s, ":") | |||
f.Format(s, 'd') | |||
} | |||
} | |||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). | |||
type StackTrace []Frame | |||
// Format formats the stack of Frames according to the fmt.Formatter interface. | |||
// | |||
// %s lists source files for each Frame in the stack | |||
// %v lists the source file and line number for each Frame in the stack | |||
// | |||
// Format accepts flags that alter the printing of some verbs, as follows: | |||
// | |||
// %+v Prints filename, function, and line number for each Frame in the stack. | |||
func (st StackTrace) Format(s fmt.State, verb rune) { | |||
switch verb { | |||
case 'v': | |||
switch { | |||
case s.Flag('+'): | |||
for _, f := range st { | |||
fmt.Fprintf(s, "\n%+v", f) | |||
} | |||
case s.Flag('#'): | |||
fmt.Fprintf(s, "%#v", []Frame(st)) | |||
default: | |||
fmt.Fprintf(s, "%v", []Frame(st)) | |||
} | |||
case 's': | |||
fmt.Fprintf(s, "%s", []Frame(st)) | |||
} | |||
} | |||
// stack represents a stack of program counters. | |||
type stack []uintptr | |||
func (s *stack) Format(st fmt.State, verb rune) { | |||
switch verb { | |||
case 'v': | |||
switch { | |||
case st.Flag('+'): | |||
for _, pc := range *s { | |||
f := Frame(pc) | |||
fmt.Fprintf(st, "\n%+v", f) | |||
} | |||
} | |||
} | |||
} | |||
func (s *stack) StackTrace() StackTrace { | |||
f := make([]Frame, len(*s)) | |||
for i := 0; i < len(f); i++ { | |||
f[i] = Frame((*s)[i]) | |||
} | |||
return f | |||
} | |||
func callers() *stack { | |||
const depth = 32 | |||
var pcs [depth]uintptr | |||
n := runtime.Callers(3, pcs[:]) | |||
var st stack = pcs[0:n] | |||
return &st | |||
} | |||
// funcname removes the path prefix component of a function's name reported by func.Name(). | |||
func funcname(name string) string { | |||
i := strings.LastIndex(name, "/") | |||
name = name[i+1:] | |||
i = strings.Index(name, ".") | |||
return name[i+1:] | |||
} |
@ -0,0 +1,12 @@ | |||
# This is the official list of Snappy-Go authors for copyright purposes. | |||
# This file is distinct from the CONTRIBUTORS files. | |||
# See the latter for an explanation. | |||
# Names should be added to this file as | |||
# Name or Organization <email address> | |||
# The email address is not required for organizations. | |||
# Please keep the list sorted. | |||
Google Inc. | |||
Jan Mercl <0xjnml@gmail.com> |