This commit is contained in:
2025-08-06 17:38:23 +02:00
parent 4f291c93e5
commit 3892d966f3
17 changed files with 831 additions and 490 deletions

135
pkg/client/client.go Normal file
View File

@@ -0,0 +1,135 @@
package client
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"log"
"net/http"
"time"
"git.tijl.dev/tijl/shortify/pkg/generation"
bolt "go.etcd.io/bbolt"
)
type Client struct {
serverURL string
httpClient *http.Client
prefix uint16
gen *generation.Generator
domain string // e.g. https://sho.rt
db *bolt.DB
retryQueue chan shortenJob
stopRetry chan struct{}
}
// NewClient with persistence and retry queue
func NewClient(serverURL, domain string) (*Client, error) {
httpClient, baseURL, err := createHTTPClient(serverURL)
if err != nil {
return nil, err
}
db, err := bolt.Open(dbFileName, 0600, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return nil, err
}
cli := &Client{
serverURL: baseURL,
httpClient: httpClient,
domain: domain,
db: db,
retryQueue: make(chan shortenJob, 1000),
stopRetry: make(chan struct{}),
}
// Create buckets if not exist
err = db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(bucketPrefix))
if err != nil {
return err
}
_, err = tx.CreateBucketIfNotExists([]byte(bucketRetryJobs))
return err
})
if err != nil {
return nil, err
}
// Load or get prefix
var prefix uint16
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketPrefix))
v := b.Get([]byte("prefix"))
if v == nil {
return nil
}
if len(v) != 2 {
return fmt.Errorf("invalid prefix length in db")
}
prefix = binary.BigEndian.Uint16(v)
return nil
})
if err != nil {
return nil, err
}
// If prefix not found, register new one and save it
if prefix == 0 {
prefix, err = cli.registerPrefix()
if err != nil {
return nil, err
}
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketPrefix))
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, prefix)
return b.Put([]byte("prefix"), buf)
})
if err != nil {
return nil, err
}
}
cli.prefix = prefix
cli.gen = generation.NewGenerator(prefix)
// Load retry jobs from DB into channel
go cli.loadRetryJobs()
// Start retry worker
go cli.retryWorker()
return cli, nil
}
// Shorten creates a short URL and sends it async to the central server
func (c *Client) Shorten(longURL string) string {
shortID := c.gen.NextID()
go func() {
payload := map[string]string{
"id": shortID,
"url": longURL,
}
data, _ := json.Marshal(payload)
req, err := http.NewRequest("POST", fmt.Sprintf("%s/shorten", c.serverURL), bytes.NewReader(data))
if err != nil {
log.Println("shorten request build error:", err)
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.httpClient.Do(req)
if err != nil {
log.Println("shorten request failed:", err)
return
}
defer resp.Body.Close()
}()
return fmt.Sprintf("%s/%s", c.domain, shortID)
}

138
pkg/client/store.go Normal file
View File

@@ -0,0 +1,138 @@
package client
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"time"
bolt "go.etcd.io/bbolt"
)
const (
bucketPrefix = "prefix"
bucketRetryJobs = "retry_queue"
dbFileName = "shorty_client.db"
)
type shortenJob struct {
ID string `json:"id"`
URL string `json:"url"`
}
func (c *Client) registerPrefix() (uint16, error) {
resp, err := c.httpClient.Post(fmt.Sprintf("%s/register", c.serverURL), "application/json", nil)
if err != nil {
return 0, err
}
defer resp.Body.Close()
var result struct {
Prefix uint16 `json:"prefix"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return 0, err
}
return result.Prefix, nil
}
func (c *Client) loadRetryJobs() {
c.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketRetryJobs))
b.ForEach(func(k, v []byte) error {
var job shortenJob
err := json.Unmarshal(v, &job)
if err == nil {
select {
case c.retryQueue <- job:
default:
// channel full, drop or log
}
}
return nil
})
return nil
})
}
func (c *Client) retryWorker() {
for {
select {
case job := <-c.retryQueue:
err := c.sendShortenJob(job)
if err != nil {
// Re-enqueue with delay
go func(j shortenJob) {
time.Sleep(2 * time.Second)
c.enqueueJob(j)
}(job)
} else {
c.deleteJobFromDB(job)
}
case <-c.stopRetry:
return
}
}
}
func (c *Client) enqueueJob(job shortenJob) {
// store in DB with key = job.ID
err := c.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketRetryJobs))
data, err := json.Marshal(job)
if err != nil {
return err
}
return b.Put([]byte(job.ID), data)
})
if err != nil {
log.Println("Failed to store job in db:", err)
}
select {
case c.retryQueue <- job:
default:
log.Println("Retry queue full, dropping job:", job.ID)
}
}
func (c *Client) deleteJobFromDB(job shortenJob) {
err := c.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketRetryJobs))
return b.Delete([]byte(job.ID))
})
if err != nil {
log.Println("Failed to delete job from db:", err)
}
}
func (c *Client) sendShortenJob(job shortenJob) error {
payload := map[string]string{
"id": job.ID,
"url": job.URL,
}
data, _ := json.Marshal(payload)
req, err := http.NewRequest("POST", fmt.Sprintf("%s/shorten", c.serverURL), bytes.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return fmt.Errorf("server returned status %d", resp.StatusCode)
}
return nil
}
func (c *Client) Close() error {
close(c.stopRetry)
return c.db.Close()
}

42
pkg/client/transport.go Normal file
View File

@@ -0,0 +1,42 @@
package client
import (
"context"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// createHTTPClient creates an http.Client with support for HTTP or unix socket transport
func createHTTPClient(serverURL string) (*http.Client, string, error) {
if strings.HasPrefix(serverURL, "unix://") {
// Extract socket path and strip the scheme
socketPath := strings.TrimPrefix(serverURL, "unix://")
// We'll fake a URL host for use in requests
baseURL := "http://unix"
transport := &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", socketPath)
},
}
return &http.Client{
Transport: transport,
Timeout: 2 * time.Second,
}, baseURL, nil
}
// Default HTTP transport
parsed, err := url.Parse(serverURL)
if err != nil {
return nil, "", err
}
return &http.Client{
Timeout: 2 * time.Second,
}, parsed.String(), nil
}

62
pkg/generation/base62.go Normal file
View File

@@ -0,0 +1,62 @@
package generation
import (
"errors"
)
const alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var (
base = len(alphabet)
rev [256]int
)
func init() {
for i := range rev {
rev[i] = -1
}
for i, c := range alphabet {
rev[c] = i
}
}
// EncodeBase62 encodes a byte slice into a base62 string
func EncodeBase62(b []byte) string {
var n uint64
for i := 0; i < len(b); i++ {
n = n<<8 + uint64(b[i])
}
if n == 0 {
return string(alphabet[0])
}
var s []byte
for n > 0 {
r := n % uint64(base)
s = append([]byte{alphabet[r]}, s...)
n = n / uint64(base)
}
return string(s)
}
// DecodeBase62 decodes a base62 string into a byte slice
func DecodeBase62(s string) ([]byte, error) {
var n uint64
for i := 0; i < len(s); i++ {
c := s[i]
val := rev[c]
if val == -1 {
return nil, errors.New("invalid base62 character")
}
n = n*uint64(base) + uint64(val)
}
// Recover byte slice from integer
b := make([]byte, rawIDLength)
for i := rawIDLength - 1; i >= 0; i-- {
b[i] = byte(n & 0xff)
n >>= 8
}
return b, nil
}

View File

@@ -0,0 +1,78 @@
package generation
import (
cryptoRand "crypto/rand"
"encoding/binary"
"sync"
)
const (
idSize = 4 // 4 bytes for random part
prefixSize = 2 // 2 bytes for client prefix
rawIDLength = prefixSize + idSize // total 6 bytes
base62Len = 8 // 6 bytes encoded in base62 ~ 8 chars
poolSize = 10000
)
type Generator struct {
prefix [2]byte
idPool chan string
mu sync.Mutex
started bool
}
// NewGenerator initializes the generator with a 2-byte prefix
func NewGenerator(prefix uint16) *Generator {
var b [2]byte
binary.BigEndian.PutUint16(b[:], prefix)
g := &Generator{
prefix: b,
idPool: make(chan string, poolSize),
}
go g.fillPool()
return g
}
// fillPool pre-generates short IDs into the pool
func (g *Generator) fillPool() {
for {
for i := 0; i < poolSize/10; i++ {
id := g.generateRawID()
g.idPool <- EncodeBase62(id)
}
}
}
// generateRawID creates 6 bytes: 2-byte prefix + 4-byte random
func (g *Generator) generateRawID() []byte {
random := make([]byte, idSize)
_, err := cryptoRand.Read(random)
if err != nil {
panic("failed to read random bytes: " + err.Error())
}
raw := make([]byte, rawIDLength)
copy(raw[0:2], g.prefix[:])
copy(raw[2:], random)
return raw
}
// NextID returns the next available short ID from the pool
func (g *Generator) NextID() string {
return <-g.idPool
}
// DecodeID (optional) for analytics/debugging
func DecodeID(shortID string) (prefix uint16, randomPart []byte, err error) {
raw, err := DecodeBase62(shortID)
if err != nil {
return 0, nil, err
}
if len(raw) != rawIDLength {
return 0, nil, err
}
prefix = binary.BigEndian.Uint16(raw[0:2])
randomPart = raw[2:]
return
}