Skip to main content

sync Package — Concurrency Primitives

If channels represent "sharing through communication," the sync package provides traditional mutex-based synchronization. Both approaches are complementary and should be used based on the situation.

sync.Mutex — Mutual Exclusion

When multiple goroutines modify the same variable concurrently, a data race occurs.

package main

import (
"fmt"
"sync"
)

// ❌ Data race example
type UnsafeCounter struct {
count int
}

func (c *UnsafeCounter) Increment() {
c.count++ // Read-modify-write is not atomic!
}

// ✅ Protected with Mutex
type SafeCounter struct {
mu sync.Mutex
count int
}

func (c *SafeCounter) Increment() {
c.mu.Lock()
defer c.mu.Unlock() // Always unlock with defer
c.count++
}

func (c *SafeCounter) Value() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.count
}

func main() {
counter := &SafeCounter{}
var wg sync.WaitGroup

for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
counter.Increment()
}()
}

wg.Wait()
fmt.Println("Final count:", counter.Value()) // Always 1000
}

Mutex usage rules:

  • After Lock(), always call Unlock() — use defer
  • Locking an already-locked Mutex causes a deadlock
  • Never copy a Mutex — always pass as pointer

sync.RWMutex — Separate Read/Write Locking

When reads are frequent and writes are rare, RWMutex improves performance.

package main

import (
"fmt"
"sync"
"time"
)

type Cache struct {
mu sync.RWMutex
data map[string]string
}

func NewCache() *Cache {
return &Cache{data: make(map[string]string)}
}

// Read: RLock allows multiple concurrent readers
func (c *Cache) Get(key string) (string, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
val, ok := c.data[key]
return val, ok
}

// Write: Lock ensures exclusive access
func (c *Cache) Set(key, value string) {
c.mu.Lock()
defer c.mu.Unlock()
c.data[key] = value
}

func main() {
cache := NewCache()
var wg sync.WaitGroup

// Initial data
cache.Set("name", "Alice")
cache.Set("language", "Go")

// Concurrent reads — RLock allows parallel execution
for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
if val, ok := cache.Get("name"); ok {
fmt.Printf("Reader %d: %s\n", id, val)
}
}(i)
}

// Write — exclusive access
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(10 * time.Millisecond)
cache.Set("name", "Bob")
fmt.Println("Name updated")
}()

wg.Wait()
}
Lock TypeMethodConcurrent Access
Write lockLock() / Unlock()Exclusive (blocks reads and writes)
Read lockRLock() / RUnlock()Shared (other reads allowed)

sync.Once — Execute Only Once

For initialization code that must run exactly once.

package main

import (
"fmt"
"sync"
)

type Singleton struct {
value string
}

var (
instance *Singleton
once sync.Once
)

func getInstance() *Singleton {
once.Do(func() {
fmt.Println("Initializing (only once!)")
instance = &Singleton{value: "the only instance"}
})
return instance
}

func main() {
var wg sync.WaitGroup

for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
inst := getInstance()
fmt.Printf("Goroutine %d: %s\n", id, inst.value)
}(i)
}

wg.Wait()
}
// "Initializing (only once!)" prints exactly once

sync.WaitGroup — Advanced Usage

Advanced patterns for WaitGroup.

package main

import (
"fmt"
"sync"
"time"
)

// Worker pool pattern
func workerPool(jobs <-chan int, results chan<- int, workerCount int) {
var wg sync.WaitGroup

for i := 0; i < workerCount; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for job := range jobs {
time.Sleep(100 * time.Millisecond)
result := job * job
fmt.Printf("Worker %d: %d → %d\n", id, job, result)
results <- result
}
}(i)
}

// Close results channel after all workers finish
go func() {
wg.Wait()
close(results)
}()
}

func main() {
jobs := make(chan int, 10)
results := make(chan int, 10)

// Start worker pool (3 workers)
workerPool(jobs, results, 3)

// Send jobs
for i := 1; i <= 9; i++ {
jobs <- i
}
close(jobs)

// Collect results
sum := 0
for r := range results {
sum += r
}
fmt.Println("Sum:", sum) // 1+4+9+...+81 = 285
}

sync.Cond — Condition Variable

Use when you want to wait until a specific condition is met.

package main

import (
"fmt"
"sync"
"time"
)

type Queue struct {
mu sync.Mutex
cond *sync.Cond
items []int
}

func NewQueue() *Queue {
q := &Queue{}
q.cond = sync.NewCond(&q.mu)
return q
}

func (q *Queue) Push(item int) {
q.mu.Lock()
defer q.mu.Unlock()
q.items = append(q.items, item)
q.cond.Signal() // Wake one waiting goroutine
}

func (q *Queue) Pop() int {
q.mu.Lock()
defer q.mu.Unlock()
for len(q.items) == 0 {
q.cond.Wait() // Release Mutex, wait; re-acquire on wake
}
item := q.items[0]
q.items = q.items[1:]
return item
}

func main() {
q := NewQueue()

// Consumer
go func() {
for {
item := q.Pop()
fmt.Println("Consumed:", item)
}
}()

// Producer
for i := 1; i <= 5; i++ {
time.Sleep(200 * time.Millisecond)
q.Push(i)
fmt.Println("Produced:", i)
}

time.Sleep(500 * time.Millisecond)
}

sync.Map — Concurrency-Safe Map

The basic Go map is not safe for concurrent writes. sync.Map provides a concurrency-safe map.

package main

import (
"fmt"
"sync"
)

func main() {
var sm sync.Map

var wg sync.WaitGroup

// Concurrent writes
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := fmt.Sprintf("key%d", id)
sm.Store(key, id*id)
}(i)
}

wg.Wait()

// Read
if val, ok := sm.Load("key5"); ok {
fmt.Println("key5:", val)
}

// Load or store if not exists
actual, loaded := sm.LoadOrStore("key5", 999)
fmt.Printf("key5: %v (existed: %v)\n", actual, loaded)

// Iterate all
sm.Range(func(key, value any) bool {
fmt.Printf("%s = %v\n", key, value)
return true // Return false to stop iteration
})

// Delete
sm.Delete("key3")
}

When sync.Map is appropriate:

  • Caches with rare writes and many reads
  • When different goroutines access different keys

When regular map + Mutex is better:

  • When the map has many writes
  • Structurally simple concurrent access

Key Takeaways

TypeUse Case
sync.MutexSimple mutual exclusion
sync.RWMutexBetter performance when reads dominate
sync.OnceRun initialization code exactly once
sync.WaitGroupWait for multiple goroutines to complete
sync.CondCondition-based wait/notify
sync.MapConcurrency-safe map
  • Mutex vs Channel: protect shared state → Mutex, pass data → channel
  • defer Unlock() pattern — always unlock with defer after Lock
  • Never copy sync types— always pass as pointer