mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
refactor(usage): replace channel-based queue with mutex-protected slice
- Switched to a slice-based queue with mutex and condition variable for better control over queuing and dispatching. - Removed fixed buffer size to handle dynamic queuing. - Enhanced shutdown logic to safely close the queue and wake up waiting goroutines.
This commit is contained in:
@@ -42,7 +42,11 @@ type Manager struct {
|
|||||||
once sync.Once
|
once sync.Once
|
||||||
stopOnce sync.Once
|
stopOnce sync.Once
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
queue chan queueItem
|
|
||||||
|
mu sync.Mutex
|
||||||
|
cond *sync.Cond
|
||||||
|
queue []queueItem
|
||||||
|
closed bool
|
||||||
|
|
||||||
pluginsMu sync.RWMutex
|
pluginsMu sync.RWMutex
|
||||||
plugins []Plugin
|
plugins []Plugin
|
||||||
@@ -50,10 +54,9 @@ type Manager struct {
|
|||||||
|
|
||||||
// NewManager constructs a manager with a buffered queue.
|
// NewManager constructs a manager with a buffered queue.
|
||||||
func NewManager(buffer int) *Manager {
|
func NewManager(buffer int) *Manager {
|
||||||
if buffer <= 0 {
|
m := &Manager{}
|
||||||
buffer = 256
|
m.cond = sync.NewCond(&m.mu)
|
||||||
}
|
return m
|
||||||
return &Manager{queue: make(chan queueItem, buffer)}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start launches the background dispatcher. Calling Start multiple times is safe.
|
// Start launches the background dispatcher. Calling Start multiple times is safe.
|
||||||
@@ -80,7 +83,10 @@ func (m *Manager) Stop() {
|
|||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
}
|
}
|
||||||
close(m.queue)
|
m.mu.Lock()
|
||||||
|
m.closed = true
|
||||||
|
m.mu.Unlock()
|
||||||
|
m.cond.Broadcast()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,40 +108,30 @@ func (m *Manager) Publish(ctx context.Context, record Record) {
|
|||||||
}
|
}
|
||||||
// ensure worker is running even if Start was not called explicitly
|
// ensure worker is running even if Start was not called explicitly
|
||||||
m.Start(context.Background())
|
m.Start(context.Background())
|
||||||
select {
|
m.mu.Lock()
|
||||||
case m.queue <- queueItem{ctx: ctx, record: record}:
|
if m.closed {
|
||||||
default:
|
m.mu.Unlock()
|
||||||
// queue is full; drop the record to avoid blocking runtime paths
|
return
|
||||||
log.Debugf("usage: queue full, dropping record for provider %s", record.Provider)
|
|
||||||
}
|
}
|
||||||
|
m.queue = append(m.queue, queueItem{ctx: ctx, record: record})
|
||||||
|
m.mu.Unlock()
|
||||||
|
m.cond.Signal()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) run(ctx context.Context) {
|
func (m *Manager) run(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
m.mu.Lock()
|
||||||
case <-ctx.Done():
|
for !m.closed && len(m.queue) == 0 {
|
||||||
m.drain()
|
m.cond.Wait()
|
||||||
return
|
|
||||||
case item, ok := <-m.queue:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.dispatch(item)
|
|
||||||
}
|
}
|
||||||
}
|
if len(m.queue) == 0 && m.closed {
|
||||||
}
|
m.mu.Unlock()
|
||||||
|
|
||||||
func (m *Manager) drain() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item, ok := <-m.queue:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.dispatch(item)
|
|
||||||
default:
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
item := m.queue[0]
|
||||||
|
m.queue = m.queue[1:]
|
||||||
|
m.mu.Unlock()
|
||||||
|
m.dispatch(item)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user