1 | | | import 'dart:async'; |
2 | | | import 'dart:collection'; |
3 | | |
|
4 | | | import 'concurrency_settings.dart'; |
5 | | | import 'squadron.dart'; |
6 | | | import 'squadron_error.dart'; |
7 | | | import 'worker.dart'; |
8 | | | import 'perf_counter.dart'; |
9 | | | import 'worker_service.dart'; |
10 | | | import 'worker_stat.dart'; |
11 | | |
|
12 | | | import 'worker_task.dart'; |
13 | | |
|
14 | | | part '_pool_worker.dart'; |
15 | | |
|
16 | | | /// Worker pool responsible for instantiating, starting and stopping workers running in parallel. |
17 | | | /// A [WorkerPool] is also responsible for creating and assigning [WorkerTask]s to [Worker]s. |
18 | | | class WorkerPool<W extends Worker> implements WorkerService { |
19 | | | /// Create a worker pool. |
20 | | | /// |
21 | | | /// Workers are instantiated using the provided [_workerFactory]. |
22 | | | /// The pool will only instantiate workers as needed, depending on [concurrencySettings]. |
23 | | | /// The [ConcurrencySettings.minWorkers] and [ConcurrencySettings.maxWorkers] settings control |
24 | | | /// how many workers will live in the pool. The [ConcurrencySettings.maxParallel] setting |
25 | | | /// controls how many tasks can be posted to each individual worker in the pool. |
26 | | 1 | WorkerPool(this._workerFactory, |
27 | | | {ConcurrencySettings? concurrencySettings, |
28 | | | @Deprecated('use concurrencySettings instead') int minWorkers = 0, |
29 | | | @Deprecated('use concurrencySettings instead') int maxWorkers = 0, |
30 | | | @Deprecated('use concurrencySettings instead') int maxParallel = 1}) |
31 | | | : concurrencySettings = concurrencySettings ?? |
32 | | 0 | ConcurrencySettings( |
33 | | | minWorkers: minWorkers, |
34 | | | maxWorkers: maxWorkers, |
35 | | | maxParallel: maxParallel); |
36 | | |
|
37 | | | final W Function() _workerFactory; |
38 | | |
|
39 | | | /// Concurrency settings. |
40 | | | final ConcurrencySettings concurrencySettings; |
41 | | |
|
42 | | | /// Maximum workers. |
43 | | 3 | int get maxWorkers => concurrencySettings.maxWorkers; |
44 | | |
|
45 | | | /// Maximum tasks per worker. |
46 | | 0 | int get maxParallel => concurrencySettings.maxParallel; |
47 | | |
|
48 | | | /// Maximum running tasks. |
49 | | 3 | int get maxConcurrency => concurrencySettings.maxConcurrency; |
50 | | |
|
51 | | 1 | final _workers = <_PoolWorker<W>>[]; |
52 | | |
|
53 | | 1 | final List<WorkerStat> _deadWorkerStats = <WorkerStat>[]; |
54 | | |
|
55 | | | /// Number of workers. |
56 | | 2 | bool get stopped => _stopped; |
57 | | | bool _stopped = false; |
58 | | |
|
59 | | | /// Number of workers. |
60 | | 3 | int get size => _workers.length; |
61 | | |
|
62 | | | /// Maximum number of workers. |
63 | | 0 | int get maxSize => _maxSize; |
64 | | | int _maxSize = 0; |
65 | | |
|
66 | | | /// Current workload. |
67 | | 0 | int get workload => stats.fold<int>(0, (p, w) => p + w.workload); |
68 | | |
|
69 | | | /// Maximum workload. |
70 | | 0 | int get maxWorkload => fullStats.fold<int>( |
71 | | 0 | 0, (p, s) => (p >= s.maxWorkload) ? p : s.maxWorkload); |
72 | | |
|
73 | | | /// Total workload. |
74 | | 0 | int get totalWorkload => |
75 | | 0 | fullStats.fold<int>(0, (p, s) => p + s.totalWorkload); |
76 | | |
|
77 | | | /// Number of errors. |
78 | | 0 | int get totalErrors => fullStats.fold<int>(0, (p, s) => p + s.totalErrors); |
79 | | |
|
80 | | 2 | Future<void> _provisionWorkers(int count) async { |
81 | | 4 | if (_workers.length < count) { |
82 | | 2 | final tasks = <Future>[]; |
83 | | 3 | while (_workers.length < count) { |
84 | | | final poolWorker = |
85 | | 6 | _PoolWorker(_workerFactory(), concurrencySettings.maxParallel); |
86 | | 3 | _workers.add(poolWorker); |
87 | | 4 | tasks.add(poolWorker.worker.start()); |
88 | | | } |
89 | | 3 | await Future.wait(tasks); |
90 | | 4 | if (_workers.length > _maxSize) { |
91 | | 3 | _maxSize = _workers.length; |
92 | | | } |
93 | | | } |
94 | | 1 | } |
95 | | |
|
96 | | | /// Ensure at least [ConcurrencySettings.minWorkers] workers are started |
97 | | | /// (defaulting to 1 if [ConcurrencySettings.minWorkers] is zero). |
98 | | 2 | Future start() { |
99 | | 2 | _stopped = false; |
100 | | 4 | return _provisionWorkers(concurrencySettings.min(0)); |
101 | | 1 | } |
102 | | |
|
103 | | 2 | int _removeWorker(_PoolWorker poolWorker, bool force) { |
104 | | 6 | if (force || _workers.length > concurrencySettings.minWorkers) { |
105 | | 3 | poolWorker.worker.stop(); |
106 | | 3 | _workers.remove(poolWorker); |
107 | | 5 | _deadWorkerStats.add(poolWorker.worker.stats); |
108 | | 1 | return 1; |
109 | | | } else { |
110 | | 1 | return 0; |
111 | | | } |
112 | | 1 | } |
113 | | |
|
114 | | | /// Stop idle pool workers matching the [predicate]. |
115 | | | /// If [predicate] is null or not provided, all workers will be stopped. |
116 | | | /// Stopping a worker does not interrupt or cancel processing. Workers will |
117 | | | /// complete pending tasks before shutting down. In the meantime, they will |
118 | | | /// not receive any new workload. |
119 | | | /// Returns the number of workers that have been stopped. |
120 | | 2 | int stop([bool Function(W worker)? predicate]) { |
121 | | 1 | List<_PoolWorker<W>> targets; |
122 | | | bool force; |
123 | | 1 | if (predicate != null) { |
124 | | | // kill workers that are idle and satisfy the predicate |
125 | | 8 | targets = _workers.where((w) => w.isIdle && predicate(w.worker)).toList(); |
126 | | 1 | force = false; |
127 | | | } else { |
128 | | | // kill workers while keeping enough workers alive to process pending tasks |
129 | | 6 | targets = _workers.skip(_queue.length).toList(); |
130 | | 2 | _stopped = true; |
131 | | | force = true; |
132 | | | } |
133 | | | var stopped = 0; |
134 | | 3 | for (var poolWorker in targets) { |
135 | | 3 | stopped += _removeWorker(poolWorker, force); |
136 | | | } |
137 | | 1 | return stopped; |
138 | | 1 | } |
139 | | |
|
140 | | 1 | final _queue = Queue<WorkerTask>(); |
141 | | 1 | final _executing = <int, WorkerTask>{}; |
142 | | |
|
143 | | | /// Gets remaining workload |
144 | | 4 | int get pendingWorkload => _queue.length; |
145 | | |
|
146 | | 2 | WorkerTask<T, W> _enqueue<T>(WorkerTask<T, W> task) { |
147 | | 2 | if (_stopped) { |
148 | | 2 | throw newSquadronError( |
149 | | | 'the pool cannot accept new requests because it is stopped'); |
150 | | | } |
151 | | 3 | _queue.addLast(task); |
152 | | 2 | _schedule(); |
153 | | 1 | return task; |
154 | | 1 | } |
155 | | |
|
156 | | | /// Registers and schedules a [task] that returns a single value. |
157 | | 0 | @Deprecated('use execute() instead') |
158 | | | Future<T> compute<T>(Future<T> Function(W worker) task, |
159 | | | {PerfCounter? counter}) => |
160 | | 0 | execute(task, counter: counter); |
161 | | |
|
162 | | | /// Registers and schedules a [task] that returns a single value. |
163 | | | /// Returns a future that completes with the task's value. |
164 | | 2 | Future<T> execute<T>(Future<T> Function(W worker) task, |
165 | | | {PerfCounter? counter}) => |
166 | | 3 | scheduleTask(task, counter: counter).value; |
167 | | |
|
168 | | | /// Registers and schedules a [task] that returns a stream of values. |
169 | | | /// Returns a stream containing the task's values. |
170 | | 2 | Stream<T> stream<T>(Stream<T> Function(W worker) task, |
171 | | | {PerfCounter? counter}) => |
172 | | 3 | scheduleStream(task, counter: counter).stream; |
173 | | |
|
174 | | | /// Registers and schedules a [task] that returns a single value. |
175 | | | /// Returns a [ValueTask]<T>. |
176 | | 2 | ValueTask<T> scheduleTask<T>(Future<T> Function(W worker) task, |
177 | | | {PerfCounter? counter}) => |
178 | | 3 | _enqueue<T>(WorkerTask.value(task, counter)); |
179 | | |
|
180 | | | /// Registers and schedules a [task] that returns a stream of values. |
181 | | | /// Returns a [StreamTask]<T>. |
182 | | 2 | StreamTask<T> scheduleStream<T>(Stream<T> Function(W worker) task, |
183 | | | {PerfCounter? counter}) => |
184 | | 3 | _enqueue<T>(WorkerTask.stream(task, counter)); |
185 | | |
|
186 | | | Timer? _timer; |
187 | | |
|
188 | | | /// The scheduler. |
189 | | | /// |
190 | | | /// Steps: |
191 | | | /// 1. remove stopped workers. |
192 | | | /// 2. remove cancelled tasks. |
193 | | | /// 3. if the task queue is not empty: |
194 | | | /// (a) instantiate up to [maxWorkers] workers (if [maxWorkers] is zero, instanciate as many workers as there are pending tasks). |
195 | | | /// (b) find max capacity available in the pool |
196 | | | /// (c) distribute tasks to workers starting with workers with highest [PoolWorker.capacity], as long as [PoolWorker.capacity] > 0. |
197 | | 2 | void _schedule() { |
198 | | 3 | if (_timer?.isActive ?? false) { |
199 | | | // ignore if the last scheduling request has not executed yet |
200 | | 1 | return; |
201 | | | } |
202 | | 4 | _timer = Timer(Duration.zero, () { |
203 | | 3 | _workers.removeWhere(_PoolWorker.isStopped); |
204 | | 5 | _queue.removeWhere((t) => t.isCancelled); |
205 | | 6 | if (_stopped && _queue.isEmpty && _executing.isEmpty) { |
206 | | 2 | stop(); |
207 | | 2 | } else if (_queue.isNotEmpty) { |
208 | | 3 | scheduleMicrotask(() { |
209 | | 8 | _provisionWorkers(concurrencySettings.max(_queue.length)).then((_) { |
210 | | | int maxCapacity; |
211 | | 5 | while (_queue.isNotEmpty && (maxCapacity = _sortAndGetMaxCapacity()) > 0) { |
212 | | 2 | maxCapacity -= 1; |
213 | | 5 | for (var idx = 0; idx < _workers.length; idx++) { |
214 | | 3 | final w = _workers[idx]; |
215 | | 7 | if (_queue.isEmpty || w.capacity == 0 || w.capacity < maxCapacity) { |
216 | | 1 | break; |
217 | | | } |
218 | | 3 | final task = _queue.removeFirst(); |
219 | | 4 | _executing[task.hashCode] = task; |
220 | | 4 | w.run(task).whenComplete(() { |
221 | | 4 | _executing.remove(task.hashCode); |
222 | | 2 | _schedule(); |
223 | | 1 | }); |
224 | | | } |
225 | | | } |
226 | | 3 | }).catchError((ex) { |
227 | | 3 | Squadron.severe('provisionning workers failed with error $ex'); |
228 | | 3 | while (_queue.isNotEmpty) { |
229 | | 3 | final task = _queue.removeFirst(); |
230 | | 2 | task.cancel('provisionning workers failed'); |
231 | | | } |
232 | | 1 | }); |
233 | | 1 | }); |
234 | | | } |
235 | | 1 | }); |
236 | | 1 | } |
237 | | |
|
238 | | 1 | int _sortAndGetMaxCapacity() { |
239 | | 3 | _workers.sort(_PoolWorker.compareCapacityDesc); |
240 | | 4 | return _workers.first.capacity; |
241 | | | } |
242 | | |
|
243 | | | /// Task cancellation. If a specific [task] is provided, only this task will be cancelled. |
244 | | | /// Otherwise, all tasks registered with the [WorkerPool] are cancelled. |
245 | | 2 | void cancel([Task? task, String? message]) { |
246 | | 1 | if (task != null) { |
247 | | 4 | WorkerTask? workerTask = _executing.remove(task.hashCode); |
248 | | 1 | if (workerTask == null) { |
249 | | 4 | _queue.removeWhere((t) { |
250 | | 2 | if (t == task) { |
251 | | 1 | workerTask = t; |
252 | | 1 | return true; |
253 | | | } else { |
254 | | 1 | return false; |
255 | | | } |
256 | | 1 | }); |
257 | | | } |
258 | | 2 | workerTask?.cancel(message); |
259 | | | } else { |
260 | | 6 | final cancelled = _executing.values.followedBy(_queue).toList(); |
261 | | 3 | _executing.clear(); |
262 | | 3 | _queue.clear(); |
263 | | 3 | for (var task in cancelled) { |
264 | | 2 | task.cancel(message); |
265 | | | } |
266 | | | } |
267 | | 1 | } |
268 | | |
|
269 | | | /// Worker statistics. |
270 | | 4 | Iterable<WorkerStat> get stats => _workers.map(_PoolWorker.getStats); |
271 | | |
|
272 | | | /// Full worker statistics. |
273 | | 0 | Iterable<WorkerStat> get fullStats => _deadWorkerStats.followedBy(stats); |
274 | | |
|
275 | | | /// Worker pools do not need an [operations] map. |
276 | | | @override |
277 | | 1 | final Map<int, CommandHandler> operations = WorkerService.noOperations; |
278 | | | } |