Backend Interview Questions - Hard

Hard-level backend interview questions covering distributed systems, advanced architecture, and complex system design.

Q1: Design a distributed caching system (like Redis Cluster).

Answer:

Consistent Hashing

Implementation

  1package main
  2
  3import (
  4	"fmt"
  5	"hash/fnv"
  6	"sort"
  7	"sync"
  8)
  9
 10type ConsistentHash struct {
 11	mu           sync.RWMutex
 12	ring         map[uint32]string
 13	sortedKeys   []uint32
 14	virtualNodes int
 15}
 16
 17func NewConsistentHash(nodes []string, virtualNodes int) *ConsistentHash {
 18	ch := &ConsistentHash{
 19		ring:         make(map[uint32]string),
 20		virtualNodes: virtualNodes,
 21	}
 22
 23	for _, node := range nodes {
 24		ch.AddNode(node)
 25	}
 26
 27	return ch
 28}
 29
 30func (ch *ConsistentHash) hash(key string) uint32 {
 31	h := fnv.New32a()
 32	h.Write([]byte(key))
 33	return h.Sum32()
 34}
 35
 36func (ch *ConsistentHash) AddNode(node string) {
 37	ch.mu.Lock()
 38	defer ch.mu.Unlock()
 39
 40	for i := 0; i < ch.virtualNodes; i++ {
 41		virtualKey := fmt.Sprintf("%s:%d", node, i)
 42		hash := ch.hash(virtualKey)
 43		ch.ring[hash] = node
 44	}
 45
 46	ch.updateSortedKeys()
 47}
 48
 49func (ch *ConsistentHash) RemoveNode(node string) {
 50	ch.mu.Lock()
 51	defer ch.mu.Unlock()
 52
 53	for i := 0; i < ch.virtualNodes; i++ {
 54		virtualKey := fmt.Sprintf("%s:%d", node, i)
 55		hash := ch.hash(virtualKey)
 56		delete(ch.ring, hash)
 57	}
 58
 59	ch.updateSortedKeys()
 60}
 61
 62func (ch *ConsistentHash) updateSortedKeys() {
 63	ch.sortedKeys = make([]uint32, 0, len(ch.ring))
 64	for k := range ch.ring {
 65		ch.sortedKeys = append(ch.sortedKeys, k)
 66	}
 67	sort.Slice(ch.sortedKeys, func(i, j int) bool {
 68		return ch.sortedKeys[i] < ch.sortedKeys[j]
 69	})
 70}
 71
 72func (ch *ConsistentHash) GetNode(key string) string {
 73	ch.mu.RLock()
 74	defer ch.mu.RUnlock()
 75
 76	if len(ch.ring) == 0 {
 77		return ""
 78	}
 79
 80	hash := ch.hash(key)
 81
 82	// Binary search for first node >= hash
 83	idx := sort.Search(len(ch.sortedKeys), func(i int) bool {
 84		return ch.sortedKeys[i] >= hash
 85	})
 86
 87	// Wrap around if needed
 88	if idx == len(ch.sortedKeys) {
 89		idx = 0
 90	}
 91
 92	return ch.ring[ch.sortedKeys[idx]]
 93}
 94
 95// Usage
 96func main() {
 97	cache := NewConsistentHash([]string{"node1", "node2", "node3"}, 150)
 98	
 99	fmt.Println(cache.GetNode("user:123")) // Returns node2
100	fmt.Println(cache.GetNode("user:456")) // Returns node1
101
102	// Add new node
103	cache.AddNode("node4")
104	
105	// Remove node
106	cache.RemoveNode("node1")
107}

Replication Strategy

 1class DistributedCache {
 2  constructor(nodes, replicationFactor = 2) {
 3    this.hash = new ConsistentHash(nodes);
 4    this.replicationFactor = replicationFactor;
 5    this.connections = new Map();
 6    
 7    nodes.forEach(node => {
 8      this.connections.set(node, createConnection(node));
 9    });
10  }
11  
12  async set(key, value, ttl) {
13    const nodes = this.getReplicaNodes(key);
14    
15    await Promise.all(
16      nodes.map(node => 
17        this.connections.get(node).set(key, value, ttl)
18      )
19    );
20  }
21  
22  async get(key) {
23    const nodes = this.getReplicaNodes(key);
24    
25    // Try primary first
26    try {
27      return await this.connections.get(nodes[0]).get(key);
28    } catch (error) {
29      // Fallback to replicas
30      for (let i = 1; i < nodes.length; i++) {
31        try {
32          return await this.connections.get(nodes[i]).get(key);
33        } catch (e) {
34          continue;
35        }
36      }
37      throw new Error('All replicas failed');
38    }
39  }
40  
41  getReplicaNodes(key) {
42    const primary = this.hash.getNode(key);
43    const replicas = [primary];
44    
45    // Get next N-1 nodes for replication
46    let currentKey = key;
47    for (let i = 1; i < this.replicationFactor; i++) {
48      currentKey = `${currentKey}:replica${i}`;
49      const replica = this.hash.getNode(currentKey);
50      if (replica !== primary && !replicas.includes(replica)) {
51        replicas.push(replica);
52      }
53    }
54    
55    return replicas;
56  }
57}

Q2: Implement event sourcing with CQRS pattern.

Answer:

Architecture

Implementation

  1// Event Store
  2class EventStore {
  3  constructor() {
  4    this.events = new Map(); // streamId -> events[]
  5    this.subscribers = [];
  6  }
  7  
  8  async append(streamId, events) {
  9    if (!this.events.has(streamId)) {
 10      this.events.set(streamId, []);
 11    }
 12    
 13    const stream = this.events.get(streamId);
 14    
 15    events.forEach(event => {
 16      event.version = stream.length + 1;
 17      event.timestamp = Date.now();
 18      stream.push(event);
 19      
 20      // Notify subscribers
 21      this.subscribers.forEach(sub => sub(event));
 22    });
 23  }
 24  
 25  async getEvents(streamId, fromVersion = 0) {
 26    const stream = this.events.get(streamId) || [];
 27    return stream.filter(e => e.version > fromVersion);
 28  }
 29  
 30  subscribe(handler) {
 31    this.subscribers.push(handler);
 32  }
 33}
 34
 35// Aggregate
 36class BankAccount {
 37  constructor(id) {
 38    this.id = id;
 39    this.balance = 0;
 40    this.version = 0;
 41    this.changes = [];
 42  }
 43  
 44  // Commands
 45  deposit(amount) {
 46    if (amount <= 0) {
 47      throw new Error('Amount must be positive');
 48    }
 49    
 50    this.applyChange({
 51      type: 'MoneyDeposited',
 52      amount
 53    });
 54  }
 55  
 56  withdraw(amount) {
 57    if (amount <= 0) {
 58      throw new Error('Amount must be positive');
 59    }
 60    
 61    if (this.balance < amount) {
 62      throw new Error('Insufficient funds');
 63    }
 64    
 65    this.applyChange({
 66      type: 'MoneyWithdrawn',
 67      amount
 68    });
 69  }
 70  
 71  // Apply event
 72  applyChange(event) {
 73    this.apply(event);
 74    this.changes.push(event);
 75  }
 76  
 77  apply(event) {
 78    switch (event.type) {
 79      case 'AccountCreated':
 80        this.balance = 0;
 81        break;
 82      case 'MoneyDeposited':
 83        this.balance += event.amount;
 84        break;
 85      case 'MoneyWithdrawn':
 86        this.balance -= event.amount;
 87        break;
 88    }
 89    this.version++;
 90  }
 91  
 92  // Load from history
 93  loadFromHistory(events) {
 94    events.forEach(event => this.apply(event));
 95  }
 96  
 97  getUncommittedChanges() {
 98    return this.changes;
 99  }
100  
101  markChangesAsCommitted() {
102    this.changes = [];
103  }
104}
105
106// Repository
107class BankAccountRepository {
108  constructor(eventStore) {
109    this.eventStore = eventStore;
110  }
111  
112  async getById(id) {
113    const events = await this.eventStore.getEvents(id);
114    const account = new BankAccount(id);
115    account.loadFromHistory(events);
116    return account;
117  }
118  
119  async save(account) {
120    const changes = account.getUncommittedChanges();
121    await this.eventStore.append(account.id, changes);
122    account.markChangesAsCommitted();
123  }
124}
125
126// Read Model (Projection)
127class AccountBalanceProjection {
128  constructor(eventStore) {
129    this.balances = new Map();
130    
131    eventStore.subscribe(event => {
132      this.handle(event);
133    });
134  }
135  
136  handle(event) {
137    const accountId = event.streamId;
138    
139    if (!this.balances.has(accountId)) {
140      this.balances.set(accountId, 0);
141    }
142    
143    switch (event.type) {
144      case 'MoneyDeposited':
145        this.balances.set(
146          accountId,
147          this.balances.get(accountId) + event.amount
148        );
149        break;
150      case 'MoneyWithdrawn':
151        this.balances.set(
152          accountId,
153          this.balances.get(accountId) - event.amount
154        );
155        break;
156    }
157  }
158  
159  getBalance(accountId) {
160    return this.balances.get(accountId) || 0;
161  }
162}
163
164// Usage
165const eventStore = new EventStore();
166const repository = new BankAccountRepository(eventStore);
167const projection = new AccountBalanceProjection(eventStore);
168
169// Command: Deposit money
170const account = await repository.getById('account-123');
171account.deposit(100);
172await repository.save(account);
173
174// Query: Get balance
175const balance = projection.getBalance('account-123');

Q3: Design a distributed lock service.

Answer:

Redlock Algorithm

  1package main
  2
  3import (
  4	"context"
  5	"crypto/rand"
  6	"encoding/hex"
  7	"fmt"
  8	"sync"
  9	"time"
 10
 11	"github.com/redis/go-redis/v9"
 12)
 13
 14type RedisLock struct {
 15	clients  []*redis.Client
 16	resource string
 17	token    string
 18	ttl      time.Duration
 19	quorum   int
 20}
 21
 22func NewRedisLock(clients []*redis.Client, resource string, ttl time.Duration) *RedisLock {
 23	return &RedisLock{
 24		clients:  clients,
 25		resource: resource,
 26		token:    generateToken(),
 27		ttl:      ttl,
 28		quorum:   len(clients)/2 + 1,
 29	}
 30}
 31
 32func generateToken() string {
 33	b := make([]byte, 16)
 34	rand.Read(b)
 35	return hex.EncodeToString(b)
 36}
 37
 38func (rl *RedisLock) Acquire(ctx context.Context) (bool, time.Duration) {
 39	startTime := time.Now()
 40	locksAcquired := 0
 41
 42	// Try to acquire lock on all instances
 43	var wg sync.WaitGroup
 44	results := make(chan bool, len(rl.clients))
 45
 46	for _, client := range rl.clients {
 47		wg.Add(1)
 48		go func(c *redis.Client) {
 49			defer wg.Done()
 50			
 51			result, err := c.SetNX(ctx, rl.resource, rl.token, rl.ttl).Result()
 52			results <- (err == nil && result)
 53		}(client)
 54	}
 55
 56	wg.Wait()
 57	close(results)
 58
 59	for acquired := range results {
 60		if acquired {
 61			locksAcquired++
 62		}
 63	}
 64
 65	elapsedTime := time.Since(startTime)
 66	validityTime := rl.ttl - elapsedTime - 100*time.Millisecond // Drift compensation
 67
 68	// Check if we got quorum and lock is still valid
 69	if locksAcquired >= rl.quorum && validityTime > 0 {
 70		return true, validityTime
 71	}
 72
 73	// Failed to acquire, release any locks we got
 74	rl.Release(ctx)
 75	return false, 0
 76}
 77
 78func (rl *RedisLock) Release(ctx context.Context) {
 79	// Lua script for atomic check-and-delete
 80	script := `
 81		if redis.call("get", KEYS[1]) == ARGV[1] then
 82			return redis.call("del", KEYS[1])
 83		else
 84			return 0
 85		end
 86	`
 87
 88	var wg sync.WaitGroup
 89	for _, client := range rl.clients {
 90		wg.Add(1)
 91		go func(c *redis.Client) {
 92			defer wg.Done()
 93			c.Eval(ctx, script, []string{rl.resource}, rl.token)
 94		}(client)
 95	}
 96	wg.Wait()
 97}
 98
 99func (rl *RedisLock) Extend(ctx context.Context, additionalTime time.Duration) bool {
100	script := `
101		if redis.call("get", KEYS[1]) == ARGV[1] then
102			return redis.call("pexpire", KEYS[1], ARGV[2])
103		else
104			return 0
105		end
106	`
107
108	extended := 0
109	var wg sync.WaitGroup
110	results := make(chan bool, len(rl.clients))
111
112	for _, client := range rl.clients {
113		wg.Add(1)
114		go func(c *redis.Client) {
115			defer wg.Done()
116			
117			result, err := c.Eval(ctx, script, []string{rl.resource}, rl.token, additionalTime.Milliseconds()).Int()
118			results <- (err == nil && result == 1)
119		}(client)
120	}
121
122	wg.Wait()
123	close(results)
124
125	for success := range results {
126		if success {
127			extended++
128		}
129	}
130
131	return extended >= rl.quorum
132}
133
134// Usage with retry
135func WithLock(ctx context.Context, clients []*redis.Client, resource string, fn func() error, options ...LockOption) error {
136	opts := &lockOptions{
137		ttl:        10 * time.Second,
138		retries:    3,
139		retryDelay: 100 * time.Millisecond,
140	}
141
142	for _, opt := range options {
143		opt(opts)
144	}
145
146	for i := 0; i < opts.retries; i++ {
147		lock := NewRedisLock(clients, resource, opts.ttl)
148		acquired, _ := lock.Acquire(ctx)
149
150		if acquired {
151			defer lock.Release(ctx)
152			return fn()
153		}
154
155		// Wait before retry with exponential backoff
156		time.Sleep(opts.retryDelay * time.Duration(i+1))
157	}
158
159	return fmt.Errorf("failed to acquire lock after %d retries", opts.retries)
160}
161
162type lockOptions struct {
163	ttl        time.Duration
164	retries    int
165	retryDelay time.Duration
166}
167
168type LockOption func(*lockOptions)
169
170// Example usage
171func main() {
172	clients := []*redis.Client{
173		redis.NewClient(&redis.Options{Addr: "redis1:6379"}),
174		redis.NewClient(&redis.Options{Addr: "redis2:6379"}),
175		redis.NewClient(&redis.Options{Addr: "redis3:6379"}),
176	}
177
178	ctx := context.Background()
179
180	err := WithLock(ctx, clients, "resource:123", func() error {
181		// Critical section
182		fmt.Println("Processing critical section")
183		time.Sleep(2 * time.Second)
184		return nil
185	})
186
187	if err != nil {
188		fmt.Println("Error:", err)
189	}
190}

Q4: Implement saga pattern for distributed transactions.

Answer:

Orchestration-Based Saga

  1class OrderSaga {
  2  constructor(services) {
  3    this.orderService = services.order;
  4    this.paymentService = services.payment;
  5    this.inventoryService = services.inventory;
  6    this.shippingService = services.shipping;
  7  }
  8  
  9  async execute(orderData) {
 10    const saga = {
 11      orderId: null,
 12      paymentId: null,
 13      reservationId: null,
 14      shippingId: null,
 15      compensations: []
 16    };
 17    
 18    try {
 19      // Step 1: Create order
 20      saga.orderId = await this.orderService.create(orderData);
 21      saga.compensations.push(() => this.orderService.cancel(saga.orderId));
 22      
 23      // Step 2: Process payment
 24      saga.paymentId = await this.paymentService.charge({
 25        orderId: saga.orderId,
 26        amount: orderData.total
 27      });
 28      saga.compensations.push(() => this.paymentService.refund(saga.paymentId));
 29      
 30      // Step 3: Reserve inventory
 31      saga.reservationId = await this.inventoryService.reserve({
 32        orderId: saga.orderId,
 33        items: orderData.items
 34      });
 35      saga.compensations.push(() => 
 36        this.inventoryService.release(saga.reservationId)
 37      );
 38      
 39      // Step 4: Schedule shipping
 40      saga.shippingId = await this.shippingService.schedule({
 41        orderId: saga.orderId,
 42        address: orderData.shippingAddress
 43      });
 44      saga.compensations.push(() => 
 45        this.shippingService.cancel(saga.shippingId)
 46      );
 47      
 48      // All steps succeeded
 49      await this.orderService.confirm(saga.orderId);
 50      return { success: true, orderId: saga.orderId };
 51      
 52    } catch (error) {
 53      // Compensate in reverse order
 54      console.error('Saga failed, compensating...', error);
 55      
 56      for (let i = saga.compensations.length - 1; i >= 0; i--) {
 57        try {
 58          await saga.compensations[i]();
 59        } catch (compError) {
 60          console.error('Compensation failed:', compError);
 61        }
 62      }
 63      
 64      return { success: false, error: error.message };
 65    }
 66  }
 67}
 68
 69// Choreography-Based Saga (Event-Driven)
 70class EventDrivenSaga {
 71  constructor(eventBus) {
 72    this.eventBus = eventBus;
 73    this.setupHandlers();
 74  }
 75  
 76  setupHandlers() {
 77    // Order created -> Charge payment
 78    this.eventBus.on('OrderCreated', async (event) => {
 79      try {
 80        const payment = await paymentService.charge({
 81          orderId: event.orderId,
 82          amount: event.total
 83        });
 84        
 85        await this.eventBus.emit('PaymentSucceeded', {
 86          orderId: event.orderId,
 87          paymentId: payment.id
 88        });
 89      } catch (error) {
 90        await this.eventBus.emit('PaymentFailed', {
 91          orderId: event.orderId,
 92          error: error.message
 93        });
 94      }
 95    });
 96    
 97    // Payment succeeded -> Reserve inventory
 98    this.eventBus.on('PaymentSucceeded', async (event) => {
 99      try {
100        const reservation = await inventoryService.reserve({
101          orderId: event.orderId
102        });
103        
104        await this.eventBus.emit('InventoryReserved', {
105          orderId: event.orderId,
106          reservationId: reservation.id
107        });
108      } catch (error) {
109        await this.eventBus.emit('InventoryReservationFailed', {
110          orderId: event.orderId,
111          paymentId: event.paymentId
112        });
113      }
114    });
115    
116    // Inventory reservation failed -> Refund payment
117    this.eventBus.on('InventoryReservationFailed', async (event) => {
118      await paymentService.refund(event.paymentId);
119      await orderService.cancel(event.orderId);
120    });
121    
122    // Continue with other steps...
123  }
124}

Q5: Design a real-time notification system at scale.

Answer:

Architecture

Implementation

  1// WebSocket Server
  2class NotificationServer {
  3  constructor(redis, messageQueue) {
  4    this.connections = new Map(); // userId -> WebSocket[]
  5    this.redis = redis;
  6    this.messageQueue = messageQueue;
  7    this.serverId = `server-${process.pid}`;
  8    
  9    this.setupPubSub();
 10  }
 11  
 12  setupPubSub() {
 13    // Subscribe to Redis pub/sub
 14    this.redis.subscribe('notifications');
 15    
 16    this.redis.on('message', (channel, message) => {
 17      const notification = JSON.parse(message);
 18      this.deliverToLocalConnections(notification);
 19    });
 20  }
 21  
 22  async handleConnection(ws, userId) {
 23    // Store connection
 24    if (!this.connections.has(userId)) {
 25      this.connections.set(userId, []);
 26    }
 27    this.connections.get(userId).push(ws);
 28    
 29    // Update presence
 30    await this.redis.sadd(`online:${userId}`, this.serverId);
 31    await this.redis.expire(`online:${userId}`, 30);
 32    
 33    // Send pending notifications
 34    await this.sendPendingNotifications(userId, ws);
 35    
 36    // Setup heartbeat
 37    const heartbeat = setInterval(() => {
 38      if (ws.readyState === ws.OPEN) {
 39        ws.ping();
 40        this.redis.expire(`online:${userId}`, 30);
 41      } else {
 42        clearInterval(heartbeat);
 43      }
 44    }, 10000);
 45    
 46    ws.on('close', () => {
 47      this.handleDisconnection(userId, ws);
 48      clearInterval(heartbeat);
 49    });
 50  }
 51  
 52  async handleDisconnection(userId, ws) {
 53    const connections = this.connections.get(userId);
 54    if (connections) {
 55      const index = connections.indexOf(ws);
 56      if (index > -1) {
 57        connections.splice(index, 1);
 58      }
 59      
 60      if (connections.length === 0) {
 61        this.connections.delete(userId);
 62        await this.redis.srem(`online:${userId}`, this.serverId);
 63      }
 64    }
 65  }
 66  
 67  async sendNotification(userId, notification) {
 68    // Check if user is online
 69    const onlineServers = await this.redis.smembers(`online:${userId}`);
 70    
 71    if (onlineServers.length > 0) {
 72      // User is online, publish to Redis
 73      await this.redis.publish('notifications', JSON.stringify({
 74        userId,
 75        notification
 76      }));
 77    } else {
 78      // User is offline, store for later
 79      await this.storeNotification(userId, notification);
 80    }
 81  }
 82  
 83  deliverToLocalConnections(data) {
 84    const { userId, notification } = data;
 85    const connections = this.connections.get(userId);
 86    
 87    if (connections) {
 88      connections.forEach(ws => {
 89        if (ws.readyState === ws.OPEN) {
 90          ws.send(JSON.stringify(notification));
 91        }
 92      });
 93    }
 94  }
 95  
 96  async sendPendingNotifications(userId, ws) {
 97    const notifications = await this.getPendingNotifications(userId);
 98    
 99    notifications.forEach(notification => {
100      ws.send(JSON.stringify(notification));
101    });
102    
103    // Mark as delivered
104    await this.markAsDelivered(userId, notifications);
105  }
106  
107  async storeNotification(userId, notification) {
108    await this.messageQueue.add('store-notification', {
109      userId,
110      notification,
111      timestamp: Date.now()
112    });
113  }
114  
115  async getPendingNotifications(userId) {
116    // Get from database
117    return await db.notifications.find({
118      userId,
119      delivered: false
120    }).sort({ timestamp: 1 }).limit(100);
121  }
122  
123  async markAsDelivered(userId, notifications) {
124    const ids = notifications.map(n => n.id);
125    await db.notifications.updateMany(
126      { id: { $in: ids } },
127      { $set: { delivered: true } }
128    );
129  }
130}
131
132// Client
133class NotificationClient {
134  constructor(url, userId, token) {
135    this.url = url;
136    this.userId = userId;
137    this.token = token;
138    this.ws = null;
139    this.reconnectAttempts = 0;
140    this.maxReconnectAttempts = 5;
141  }
142  
143  connect() {
144    this.ws = new WebSocket(`${this.url}?token=${this.token}`);
145    
146    this.ws.on('open', () => {
147      console.log('Connected');
148      this.reconnectAttempts = 0;
149    });
150    
151    this.ws.on('message', (data) => {
152      const notification = JSON.parse(data);
153      this.handleNotification(notification);
154    });
155    
156    this.ws.on('close', () => {
157      console.log('Disconnected');
158      this.reconnect();
159    });
160    
161    this.ws.on('error', (error) => {
162      console.error('WebSocket error:', error);
163    });
164  }
165  
166  reconnect() {
167    if (this.reconnectAttempts < this.maxReconnectAttempts) {
168      this.reconnectAttempts++;
169      const delay = Math.min(1000 * Math.pow(2, this.reconnectAttempts), 30000);
170      
171      setTimeout(() => {
172        console.log(`Reconnecting (attempt ${this.reconnectAttempts})...`);
173        this.connect();
174      }, delay);
175    }
176  }
177  
178  handleNotification(notification) {
179    console.log('Received notification:', notification);
180    // Display to user
181  }
182}

Q6: Implement database connection pooling with circuit breaker.

Answer:

Implementation

  1class CircuitBreaker {
  2  constructor(options = {}) {
  3    this.failureThreshold = options.failureThreshold || 5;
  4    this.successThreshold = options.successThreshold || 2;
  5    this.timeout = options.timeout || 60000;
  6    this.state = 'CLOSED';
  7    this.failures = 0;
  8    this.successes = 0;
  9    this.nextAttempt = Date.now();
 10  }
 11  
 12  async execute(fn) {
 13    if (this.state === 'OPEN') {
 14      if (Date.now() < this.nextAttempt) {
 15        throw new Error('Circuit breaker is OPEN');
 16      }
 17      this.state = 'HALF_OPEN';
 18    }
 19    
 20    try {
 21      const result = await fn();
 22      this.onSuccess();
 23      return result;
 24    } catch (error) {
 25      this.onFailure();
 26      throw error;
 27    }
 28  }
 29  
 30  onSuccess() {
 31    this.failures = 0;
 32    
 33    if (this.state === 'HALF_OPEN') {
 34      this.successes++;
 35      
 36      if (this.successes >= this.successThreshold) {
 37        this.state = 'CLOSED';
 38        this.successes = 0;
 39      }
 40    }
 41  }
 42  
 43  onFailure() {
 44    this.failures++;
 45    this.successes = 0;
 46    
 47    if (this.failures >= this.failureThreshold) {
 48      this.state = 'OPEN';
 49      this.nextAttempt = Date.now() + this.timeout;
 50    }
 51  }
 52  
 53  getState() {
 54    return {
 55      state: this.state,
 56      failures: this.failures,
 57      successes: this.successes
 58    };
 59  }
 60}
 61
 62class ResilientConnectionPool {
 63  constructor(config) {
 64    this.pool = new Pool(config);
 65    this.circuitBreaker = new CircuitBreaker({
 66      failureThreshold: 5,
 67      successThreshold: 2,
 68      timeout: 60000
 69    });
 70    
 71    this.setupMonitoring();
 72  }
 73  
 74  async query(sql, params) {
 75    return await this.circuitBreaker.execute(async () => {
 76      const client = await this.pool.connect();
 77      
 78      try {
 79        const result = await Promise.race([
 80          client.query(sql, params),
 81          this.timeout(5000)
 82        ]);
 83        return result;
 84      } finally {
 85        client.release();
 86      }
 87    });
 88  }
 89  
 90  timeout(ms) {
 91    return new Promise((_, reject) =>
 92      setTimeout(() => reject(new Error('Query timeout')), ms)
 93    );
 94  }
 95  
 96  setupMonitoring() {
 97    setInterval(() => {
 98      const poolStats = {
 99        total: this.pool.totalCount,
100        idle: this.pool.idleCount,
101        waiting: this.pool.waitingCount
102      };
103      
104      const cbStats = this.circuitBreaker.getState();
105      
106      console.log('Pool stats:', poolStats);
107      console.log('Circuit breaker:', cbStats);
108      
109      // Alert if pool is exhausted
110      if (poolStats.idle === 0 && poolStats.waiting > 5) {
111        console.error('ALERT: Connection pool exhausted!');
112      }
113      
114      // Alert if circuit is open
115      if (cbStats.state === 'OPEN') {
116        console.error('ALERT: Circuit breaker is OPEN!');
117      }
118    }, 10000);
119  }
120  
121  async healthCheck() {
122    try {
123      await this.query('SELECT 1');
124      return { healthy: true };
125    } catch (error) {
126      return { healthy: false, error: error.message };
127    }
128  }
129  
130  async close() {
131    await this.pool.end();
132  }
133}
134
135// Usage with retry
136async function queryWithRetry(pool, sql, params, retries = 3) {
137  for (let i = 0; i < retries; i++) {
138    try {
139      return await pool.query(sql, params);
140    } catch (error) {
141      if (i === retries - 1) throw error;
142      
143      const delay = Math.min(1000 * Math.pow(2, i), 10000);
144      await new Promise(resolve => setTimeout(resolve, delay));
145    }
146  }
147}

Q7: Design a multi-tenant SaaS database architecture.

Answer:

Shared Database, Shared Schema

 1-- All tenants in same tables
 2CREATE TABLE users (
 3    id UUID PRIMARY KEY,
 4    tenant_id UUID NOT NULL,
 5    email VARCHAR(255) NOT NULL,
 6    name VARCHAR(255),
 7    UNIQUE(tenant_id, email)
 8);
 9
10CREATE INDEX idx_users_tenant ON users(tenant_id);
11
12-- Row-level security
13CREATE POLICY tenant_isolation ON users
14    USING (tenant_id = current_setting('app.current_tenant')::UUID);
15
16ALTER TABLE users ENABLE ROW LEVEL SECURITY;
 1// Middleware to set tenant context
 2async function tenantMiddleware(req, res, next) {
 3  const tenantId = req.headers['x-tenant-id'] || req.user?.tenantId;
 4  
 5  if (!tenantId) {
 6    return res.status(400).json({ error: 'Tenant ID required' });
 7  }
 8  
 9  req.tenantId = tenantId;
10  
11  // Set for database session
12  await db.query('SET app.current_tenant = $1', [tenantId]);
13  
14  next();
15}
16
17// Query automatically filtered by tenant
18app.get('/api/users', tenantMiddleware, async (req, res) => {
19  const users = await db.query('SELECT * FROM users');
20  res.json({ users });
21});

Separate Schema Per Tenant

 1class MultiTenantDatabase {
 2  constructor(pool) {
 3    this.pool = pool;
 4    this.schemaCache = new Map();
 5  }
 6  
 7  async ensureSchema(tenantId) {
 8    if (this.schemaCache.has(tenantId)) {
 9      return;
10    }
11    
12    const schemaName = `tenant_${tenantId}`;
13    
14    await this.pool.query(`
15      CREATE SCHEMA IF NOT EXISTS ${schemaName}
16    `);
17    
18    await this.pool.query(`
19      CREATE TABLE IF NOT EXISTS ${schemaName}.users (
20        id UUID PRIMARY KEY,
21        email VARCHAR(255) UNIQUE,
22        name VARCHAR(255)
23      )
24    `);
25    
26    this.schemaCache.set(tenantId, schemaName);
27  }
28  
29  async query(tenantId, sql, params) {
30    await this.ensureSchema(tenantId);
31    const schemaName = this.schemaCache.get(tenantId);
32    
33    // Set search path
34    await this.pool.query(`SET search_path TO ${schemaName}`);
35    
36    return await this.pool.query(sql, params);
37  }
38}

Separate Database Per Tenant

 1class DatabaseRouter {
 2  constructor() {
 3    this.connections = new Map();
 4  }
 5  
 6  async getConnection(tenantId) {
 7    if (this.connections.has(tenantId)) {
 8      return this.connections.get(tenantId);
 9    }
10    
11    // Get tenant database config
12    const config = await this.getTenantConfig(tenantId);
13    
14    const pool = new Pool({
15      host: config.host,
16      port: config.port,
17      database: config.database,
18      user: config.user,
19      password: config.password
20    });
21    
22    this.connections.set(tenantId, pool);
23    return pool;
24  }
25  
26  async query(tenantId, sql, params) {
27    const pool = await this.getConnection(tenantId);
28    return await pool.query(sql, params);
29  }
30  
31  async getTenantConfig(tenantId) {
32    // Get from central registry
33    return await centralDb.query(
34      'SELECT * FROM tenant_databases WHERE tenant_id = $1',
35      [tenantId]
36    );
37  }
38}

Q8: Implement API gateway with rate limiting and authentication.

Answer:

Implementation

  1class APIGateway {
  2  constructor(config) {
  3    this.services = config.services;
  4    this.rateLimiter = new RateLimiter(config.redis);
  5    this.auth = new AuthService(config.jwtSecret);
  6    this.cache = new Cache(config.redis);
  7  }
  8  
  9  async handle(req, res) {
 10    try {
 11      // 1. Authentication
 12      const user = await this.authenticate(req);
 13      req.user = user;
 14      
 15      // 2. Rate limiting
 16      await this.checkRateLimit(req);
 17      
 18      // 3. Route to service
 19      const service = this.findService(req.path);
 20      if (!service) {
 21        return res.status(404).json({ error: 'Service not found' });
 22      }
 23      
 24      // 4. Check cache
 25      const cacheKey = this.getCacheKey(req);
 26      const cached = await this.cache.get(cacheKey);
 27      if (cached) {
 28        return res.json(cached);
 29      }
 30      
 31      // 5. Forward request
 32      const response = await this.forwardRequest(service, req);
 33      
 34      // 6. Cache response
 35      if (req.method === 'GET') {
 36        await this.cache.set(cacheKey, response.data, 60);
 37      }
 38      
 39      res.status(response.status).json(response.data);
 40      
 41    } catch (error) {
 42      this.handleError(error, res);
 43    }
 44  }
 45  
 46  async authenticate(req) {
 47    const token = req.headers.authorization?.split(' ')[1];
 48    
 49    if (!token) {
 50      throw new UnauthorizedError('No token provided');
 51    }
 52    
 53    return await this.auth.verify(token);
 54  }
 55  
 56  async checkRateLimit(req) {
 57    const key = req.user?.id || req.ip;
 58    const limit = req.user ? 1000 : 100; // Higher limit for authenticated
 59    
 60    const allowed = await this.rateLimiter.check(key, limit, 60);
 61    
 62    if (!allowed) {
 63      throw new RateLimitError('Too many requests');
 64    }
 65  }
 66  
 67  findService(path) {
 68    for (const [pattern, service] of Object.entries(this.services)) {
 69      if (path.startsWith(pattern)) {
 70        return service;
 71      }
 72    }
 73    return null;
 74  }
 75  
 76  async forwardRequest(service, req) {
 77    const url = `${service.url}${req.path}`;
 78    
 79    // Load balancing: Round-robin
 80    const instance = this.selectInstance(service);
 81    
 82    const response = await fetch(instance + req.path, {
 83      method: req.method,
 84      headers: {
 85        ...req.headers,
 86        'X-User-Id': req.user?.id,
 87        'X-Forwarded-For': req.ip
 88      },
 89      body: req.method !== 'GET' ? JSON.stringify(req.body) : undefined
 90    });
 91    
 92    return {
 93      status: response.status,
 94      data: await response.json()
 95    };
 96  }
 97  
 98  selectInstance(service) {
 99    // Simple round-robin
100    if (!service.currentIndex) {
101      service.currentIndex = 0;
102    }
103    
104    const instance = service.instances[service.currentIndex];
105    service.currentIndex = (service.currentIndex + 1) % service.instances.length;
106    
107    return instance;
108  }
109  
110  getCacheKey(req) {
111    return `${req.method}:${req.path}:${JSON.stringify(req.query)}`;
112  }
113  
114  handleError(error, res) {
115    if (error instanceof UnauthorizedError) {
116      return res.status(401).json({ error: error.message });
117    }
118    
119    if (error instanceof RateLimitError) {
120      return res.status(429).json({ error: error.message });
121    }
122    
123    console.error('Gateway error:', error);
124    res.status(500).json({ error: 'Internal server error' });
125  }
126}
127
128// Configuration
129const gateway = new APIGateway({
130  jwtSecret: process.env.JWT_SECRET,
131  redis: redisClient,
132  services: {
133    '/api/users': {
134      url: 'http://user-service',
135      instances: [
136        'http://user-service-1:3000',
137        'http://user-service-2:3000',
138        'http://user-service-3:3000'
139      ]
140    },
141    '/api/orders': {
142      url: 'http://order-service',
143      instances: [
144        'http://order-service-1:3000',
145        'http://order-service-2:3000'
146      ]
147    }
148  }
149});

Q9: Design a job scheduling system with priority queues.

Answer:

Implementation

  1class JobScheduler {
  2  constructor(redis) {
  3    this.redis = redis;
  4    this.workers = [];
  5    this.running = false;
  6  }
  7  
  8  async addJob(job, priority = 'medium') {
  9    const priorities = { high: 3, medium: 2, low: 1 };
 10    const score = priorities[priority] || 2;
 11    
 12    const jobData = {
 13      id: generateId(),
 14      ...job,
 15      priority,
 16      attempts: 0,
 17      createdAt: Date.now()
 18    };
 19    
 20    await this.redis.zadd(
 21      'jobs:pending',
 22      score,
 23      JSON.stringify(jobData)
 24    );
 25    
 26    return jobData.id;
 27  }
 28  
 29  async start(numWorkers = 5) {
 30    this.running = true;
 31    
 32    for (let i = 0; i < numWorkers; i++) {
 33      this.workers.push(this.worker(i));
 34    }
 35    
 36    await Promise.all(this.workers);
 37  }
 38  
 39  async worker(id) {
 40    console.log(`Worker ${id} started`);
 41    
 42    while (this.running) {
 43      try {
 44        const job = await this.getNextJob();
 45        
 46        if (!job) {
 47          await sleep(1000);
 48          continue;
 49        }
 50        
 51        await this.processJob(job);
 52        
 53      } catch (error) {
 54        console.error(`Worker ${id} error:`, error);
 55      }
 56    }
 57  }
 58  
 59  async getNextJob() {
 60    // Get highest priority job (ZPOPMAX)
 61    const result = await this.redis.zpopmax('jobs:pending');
 62    
 63    if (!result || result.length === 0) {
 64      return null;
 65    }
 66    
 67    return JSON.parse(result[0]);
 68  }
 69  
 70  async processJob(job) {
 71    try {
 72      // Move to processing
 73      await this.redis.hset('jobs:processing', job.id, JSON.stringify(job));
 74      
 75      // Execute job
 76      await this.executeJob(job);
 77      
 78      // Move to completed
 79      await this.redis.hdel('jobs:processing', job.id);
 80      await this.redis.hset('jobs:completed', job.id, JSON.stringify({
 81        ...job,
 82        completedAt: Date.now()
 83      }));
 84      
 85    } catch (error) {
 86      await this.handleJobFailure(job, error);
 87    }
 88  }
 89  
 90  async executeJob(job) {
 91    switch (job.type) {
 92      case 'send-email':
 93        await sendEmail(job.data);
 94        break;
 95      case 'process-image':
 96        await processImage(job.data);
 97        break;
 98      case 'generate-report':
 99        await generateReport(job.data);
100        break;
101      default:
102        throw new Error(`Unknown job type: ${job.type}`);
103    }
104  }
105  
106  async handleJobFailure(job, error) {
107    job.attempts++;
108    job.lastError = error.message;
109    
110    const maxAttempts = 3;
111    
112    if (job.attempts < maxAttempts) {
113      // Retry with exponential backoff
114      const delay = Math.min(1000 * Math.pow(2, job.attempts), 60000);
115      
116      setTimeout(async () => {
117        await this.addJob(job, job.priority);
118      }, delay);
119      
120    } else {
121      // Move to dead letter queue
122      await this.redis.hdel('jobs:processing', job.id);
123      await this.redis.hset('jobs:failed', job.id, JSON.stringify({
124        ...job,
125        failedAt: Date.now()
126      }));
127    }
128  }
129  
130  async stop() {
131    this.running = false;
132    await Promise.all(this.workers);
133  }
134  
135  async getStats() {
136    const pending = await this.redis.zcard('jobs:pending');
137    const processing = await this.redis.hlen('jobs:processing');
138    const completed = await this.redis.hlen('jobs:completed');
139    const failed = await this.redis.hlen('jobs:failed');
140    
141    return { pending, processing, completed, failed };
142  }
143}
144
145// Usage
146const scheduler = new JobScheduler(redis);
147
148// Add jobs
149await scheduler.addJob({
150  type: 'send-email',
151  data: { to: 'user@example.com', template: 'welcome' }
152}, 'high');
153
154await scheduler.addJob({
155  type: 'generate-report',
156  data: { reportId: '123' }
157}, 'low');
158
159// Start workers
160await scheduler.start(10);

Q10: Implement distributed tracing and observability.

Answer:

Distributed Tracing

  1const opentelemetry = require('@opentelemetry/api');
  2const { NodeTracerProvider } = require('@opentelemetry/sdk-trace-node');
  3const { JaegerExporter } = require('@opentelemetry/exporter-jaeger');
  4
  5class DistributedTracing {
  6  constructor() {
  7    this.provider = new NodeTracerProvider();
  8    
  9    const exporter = new JaegerExporter({
 10      endpoint: 'http://jaeger:14268/api/traces'
 11    });
 12    
 13    this.provider.addSpanProcessor(
 14      new opentelemetry.SimpleSpanProcessor(exporter)
 15    );
 16    
 17    this.provider.register();
 18    this.tracer = opentelemetry.trace.getTracer('my-service');
 19  }
 20  
 21  async traceRequest(name, fn, parentContext) {
 22    const span = this.tracer.startSpan(name, {
 23      parent: parentContext
 24    });
 25    
 26    try {
 27      const result = await fn(span);
 28      span.setStatus({ code: opentelemetry.SpanStatusCode.OK });
 29      return result;
 30    } catch (error) {
 31      span.setStatus({
 32        code: opentelemetry.SpanStatusCode.ERROR,
 33        message: error.message
 34      });
 35      span.recordException(error);
 36      throw error;
 37    } finally {
 38      span.end();
 39    }
 40  }
 41}
 42
 43// Middleware
 44function tracingMiddleware(req, res, next) {
 45  const tracer = opentelemetry.trace.getTracer('api-gateway');
 46  
 47  // Extract parent context from headers
 48  const parentContext = opentelemetry.propagation.extract(
 49    opentelemetry.context.active(),
 50    req.headers
 51  );
 52  
 53  const span = tracer.startSpan(`${req.method} ${req.path}`, {
 54    kind: opentelemetry.SpanKind.SERVER,
 55    attributes: {
 56      'http.method': req.method,
 57      'http.url': req.url,
 58      'http.target': req.path,
 59      'http.user_agent': req.headers['user-agent']
 60    }
 61  }, parentContext);
 62  
 63  req.span = span;
 64  
 65  res.on('finish', () => {
 66    span.setAttribute('http.status_code', res.statusCode);
 67    span.end();
 68  });
 69  
 70  next();
 71}
 72
 73// Service-to-service propagation
 74async function callService(url, data, parentSpan) {
 75  const span = tracer.startSpan('http.request', {
 76    parent: parentSpan,
 77    attributes: {
 78      'http.url': url,
 79      'http.method': 'POST'
 80    }
 81  });
 82  
 83  // Inject trace context into headers
 84  const headers = {};
 85  opentelemetry.propagation.inject(
 86    opentelemetry.trace.setSpan(opentelemetry.context.active(), span),
 87    headers
 88  );
 89  
 90  try {
 91    const response = await fetch(url, {
 92      method: 'POST',
 93      headers: {
 94        ...headers,
 95        'Content-Type': 'application/json'
 96      },
 97      body: JSON.stringify(data)
 98    });
 99    
100    span.setAttribute('http.status_code', response.status);
101    return await response.json();
102  } finally {
103    span.end();
104  }
105}

Summary

Hard backend topics:

  • Distributed Caching: Consistent hashing, replication, sharding
  • Event Sourcing: CQRS, event store, projections
  • Distributed Locks: Redlock, fault tolerance
  • Saga Pattern: Orchestration vs choreography, compensations
  • Real-time Notifications: WebSocket, pub/sub, presence
  • Circuit Breaker: Connection pooling, resilience
  • Multi-Tenancy: Database strategies, isolation
  • API Gateway: Rate limiting, authentication, routing
  • Job Scheduling: Priority queues, retry logic
  • Distributed Tracing: OpenTelemetry, observability

These advanced concepts enable building production-grade distributed systems.

Related Snippets