use lru for client side cache

This commit is contained in:
Tijl 2025-08-07 13:57:10 +02:00
parent 6f7c7c5ea7
commit 2377fb191a
Signed by: tijl
GPG Key ID: DAE24BFCD722F053
3 changed files with 20 additions and 22 deletions

1
go.mod
View File

@ -6,6 +6,7 @@ require (
github.com/gofiber/fiber/v2 v2.52.9
github.com/hashicorp/golang-lru v1.0.2
github.com/marcboeker/go-duckdb/v2 v2.3.4
github.com/orcaman/concurrent-map/v2 v2.0.1
go.etcd.io/bbolt v1.4.2
)

2
go.sum
View File

@ -57,6 +57,8 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpsp
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=

View File

@ -4,10 +4,10 @@ import (
"encoding/binary"
"fmt"
"net/http"
"sync"
"time"
"git.tijl.dev/tijl/shortify/pkg/generation"
lru "github.com/hashicorp/golang-lru"
bolt "go.etcd.io/bbolt"
)
@ -22,10 +22,9 @@ type Client struct {
stopRetry chan struct{}
// In-memory cache
cacheMap map[string]string // longURL -> shortID
cacheLock sync.RWMutex
maxCacheSize int
cacheCount int
cacheMap *lru.Cache
maxCacheSize int
maxCacheInitialLoad int
}
// NewClient with persistence and retry queue
@ -48,9 +47,12 @@ func NewClient(serverURL string, folder string) (*Client, error) {
stopRetry: make(chan struct{}),
}
cli.cacheMap = make(map[string]string)
cli.maxCacheSize = 10000 // or make this configurable
cli.cacheCount = 0
cli.cacheMap, err = lru.New(cli.maxCacheSize)
if err != nil {
return nil, err
}
cli.maxCacheSize = 100000 // or make this configurable
cli.maxCacheInitialLoad = 10000
// Create buckets if not exist
err = db.Update(func(tx *bolt.Tx) error {
@ -118,9 +120,10 @@ func NewClient(serverURL string, folder string) (*Client, error) {
return nil
}
c := b.Cursor()
for k, v := c.First(); k != nil && cli.cacheCount < cli.maxCacheSize; k, v = c.Next() {
cli.cacheMap[string(k)] = string(v)
cli.cacheCount++
initalCounter := 0
for k, v := c.First(); k != nil && initalCounter < cli.maxCacheInitialLoad; k, v = c.Next() {
cli.cacheMap.Add(string(k), string(v))
initalCounter++
}
return nil
})
@ -158,12 +161,9 @@ func (c *Client) Shorten(longURL string, opts ...ShortenOpt) string {
// Check memory cache
if options.useCache {
c.cacheLock.RLock()
if shortID, ok := c.cacheMap[longURL]; ok {
c.cacheLock.RUnlock()
return shortID
if shortID, ok := c.cacheMap.Get(longURL); ok {
return shortID.(string)
}
c.cacheLock.RUnlock()
}
// Generate new ID
@ -184,12 +184,7 @@ func (c *Client) Shorten(longURL string, opts ...ShortenOpt) string {
}
func (c *Client) addToCache(longURL, shortID string) {
c.cacheLock.Lock()
if _, exists := c.cacheMap[longURL]; !exists && c.cacheCount < c.maxCacheSize {
c.cacheMap[longURL] = shortID
c.cacheCount++
}
c.cacheLock.Unlock()
c.cacheMap.Add(longURL, shortID)
// Async write to BoltDB
go func() {