A Simple Caching Pattern in Go

One might use it like this:

var myCache = NewOurCache(0, calculateTimestamp)
// ...
var myValue time.Time = myCache.Open(dateText, timeText, Location)

OurCacheType is an alias for the actual data cached. It can be any type including a structure or pointer. composeKey() is an example how to consistently create a string key from whatever identifies the data. These parameters usually match the parameters of the createAValue function (if used)

Use the cache by Write() values if the Read() cannot find them, or you can use something like the Open() function, which returns the cached value if found in the cache or it generates the value (using the createAValue function), caches it, and returns it1

If a cache is disabled, Read() always returns found == false (even if it has the desired key already in data) and Write() does nothing. Open() will call the generation function every time

import (
    "sync"
    "sync/atomic"
)

// OurCacheType is not necessarily an int; whatever the cache holds, e.g. structures
type OurCacheType = int

type ourCacheData map[string]OurCacheType

// the function that does the work
type getItemFunc func(partOfKey, OtherPartOfKey string) OurCacheType

type OurCache struct {
    capacity          int
    data              ourCacheData
    createAValue      getItemFunc
    hitCount          int32
    missCount         int32
    overCapacityCount int32
    semaphore         *sync.RWMutex
    enabled           bool
}

type ourCacheDatum struct {
    key   string
    value OurCacheType
}

// NewOurCache correctly initializes a cache isntance
func NewOurCache(capacity int, createFunc getItemFunc,
    data ...ourCacheDatum) OurCache {

    result := OurCache{capacity: capacity, createAValue: createFunc,
        semaphore: &sync.RWMutex{}, data: ourCacheData{},
        enabled: true} // or start it disabled (e.g. to test without cached values)

    // initializing the cache is useful for testing
    for _, datum := range data {
        result.data[datum.key] = datum.value
    }
    return result
}

func (cache *OurCache) Open(partOfKey, otherPartOfKey string,
) OurCacheType {
    key := composeKey(partOfKey, otherPartOfKey)
    value, found := cache.Read(key)
    if !found {
        value = cache.createAValue(partOfKey, otherPartOfKey)
        cache.Write(key, value)
    }
    return value
}

func (cache *OurCache) Read(key string) (result OurCacheType, found bool) {
    cache.semaphore.RLock()
    defer cache.semaphore.RUnlock() // activates three lines down

    if !cache.enabled {
        return result, false
    }
    result, found = cache.data[key]

    if found {
        atomic.AddInt32(&(cache.hitCount), 1)
    } else {
        atomic.AddInt32(&(cache.missCount), 1)
    }

    return result, found
}

func (cache *OurCache) Write(key string, value OurCacheType) {
    cache.semaphore.Lock()
    defer cache.semaphore.Unlock()

    if cache.enabled {
        cache.checkCapacity()
        cache.data[key] = value
    }
}

// called from Write(), which has a write-lock
func (cache *OurCache) checkCapacity() {
    if cache.capacity > 0 && (len(cache.data) >= cache.capacity) {
        cache.data = ourCacheData{}
        atomic.AddInt32(&(cache.overCapacityCount), 1)
    }
}

func (cache *OurCache) Size() int {
    cache.semaphore.RLock()
    defer cache.semaphore.RUnlock()

    result := len(cache.data)
    return result
}

func (cache *OurCache) HitCount() int {
    result := atomic.LoadInt32(&cache.hitCount)
    return int(result)
}

func (cache *OurCache) MissCount() int {
    result := atomic.LoadInt32(&cache.missCount)
    return int(result)
}

func (cache *OurCache) OverCapacityCount() int {
    result := atomic.LoadInt32(&cache.overCapacityCount)
    return int(result)
}

func (cache *OurCache) Abled() bool {
    cache.semaphore.RLock()
    defer cache.semaphore.RUnlock()

    return cache.enabled
}

func (cache *OurCache) Enable) {
    cache.semaphore.Lock()
    defer cache.semaphore.Unlock()

    cache.enabled = true
}

func (cache *OurCache) Disable() {
    cache.semaphore.Lock()
    defer cache.semaphore.Unlock()

    cache.enabled = false
}

// Clear when the source of data changes
func (cache *OurCache) Clear() {
    cache.semaphore.Lock()
    defer cache.semaphore.Unlock()

    cache.data = ourCacheData{}
}

func (cache *OurCache) Reset() {
    cache.semaphore.Lock()
    defer cache.semaphore.Unlock()

    cache.data = ourCacheData{}
    cache.hitCount = 0
    cache.missCount = 0
    cache.overCapacityCount = 0
}

func (cache *OurCache) Statistics() (size, capacity, hit, miss, over int, enabled bool) {
    cache.semaphore.RLock()
    defer cache.semaphore.RUnlock()

    return cache.Size(), cache.capacity, cache.HitCount(),
            cache.MissCount(), cache.OverCapacityCount(), cache.Abled()
}

// an example of a deterministic way of generating a unique string key for a cachable value
// e.g. a consistent string representation of a Time
func composeKey(partOfKey, otherPartOfKey string) string {
    return partOfKey + "|" + otherPartOfKey
}

A generic version is possible but requires a lot of sacrifices like exposing and delegating the format of the key strings to the caller and losing the variable parameters in NewOurCache(). I could not figure out a way to define a function type that returned the type of the cache, but that critical piece might be possible

One hundred lines of code is a small price to pay for type safety


  1. My naming convention is:
    find("key") returns the value and a boolean (like aMap["key"])
    get("key") excepts/panics if it cannot return the value
    open("key") returns an existing instance or creates the value 

 
2
Kudos
 
2
Kudos

Now read this

Walk, Don’t Race

TL;DR # Cha-Cha-Changes # Breaking changes happen, and the breaking occurs between the layers (e.g. UI and API, API and database). Rather than trying to release new layers simultaneously (especially with edge distribution), commit the... Continue →