LCOV - code coverage report

Current view
top level - /src - worker_pool.dart
Test
lcov.info
Date
2022-04-02
Legend
Lines
hit
not hit
Branches
taken
not taken
# not executed
HitTotalCoverage
Lines11312590.4%
Functions00-
Branches00-
Each row represents a line of source code
LineBranchHitsSource code
1import 'dart:async';
2import 'dart:collection';
3
4import 'concurrency_settings.dart';
5import 'squadron.dart';
6import 'squadron_error.dart';
7import 'worker.dart';
8import 'perf_counter.dart';
9import 'worker_service.dart';
10import 'worker_stat.dart';
11
12import 'worker_task.dart';
13
14part '_pool_worker.dart';
15
16/// Worker pool responsible for instantiating, starting and stopping workers running in parallel.
17/// A [WorkerPool] is also responsible for creating and assigning [WorkerTask]s to [Worker]s.
18class WorkerPool<W extends Worker> implements WorkerService {
19 /// Create a worker pool.
20 ///
21 /// Workers are instantiated using the provided [_workerFactory].
22 /// The pool will only instantiate workers as needed, depending on [concurrencySettings].
23 /// The [ConcurrencySettings.minWorkers] and [ConcurrencySettings.maxWorkers] settings control
24 /// how many workers will live in the pool. The [ConcurrencySettings.maxParallel] setting
25 /// controls how many tasks can be posted to each individual worker in the pool.
261 WorkerPool(this._workerFactory,
27 {ConcurrencySettings? concurrencySettings,
28 @Deprecated('use concurrencySettings instead') int minWorkers = 0,
29 @Deprecated('use concurrencySettings instead') int maxWorkers = 0,
30 @Deprecated('use concurrencySettings instead') int maxParallel = 1})
31 : concurrencySettings = concurrencySettings ??
320 ConcurrencySettings(
33 minWorkers: minWorkers,
34 maxWorkers: maxWorkers,
35 maxParallel: maxParallel);
36
37 final W Function() _workerFactory;
38
39 /// Concurrency settings.
40 final ConcurrencySettings concurrencySettings;
41
42 /// Maximum workers.
433 int get maxWorkers => concurrencySettings.maxWorkers;
44
45 /// Maximum tasks per worker.
460 int get maxParallel => concurrencySettings.maxParallel;
47
48 /// Maximum running tasks.
493 int get maxConcurrency => concurrencySettings.maxConcurrency;
50
511 final _workers = <_PoolWorker<W>>[];
52
531 final List<WorkerStat> _deadWorkerStats = <WorkerStat>[];
54
55 /// Number of workers.
562 bool get stopped => _stopped;
57 bool _stopped = false;
58
59 /// Number of workers.
603 int get size => _workers.length;
61
62 /// Maximum number of workers.
630 int get maxSize => _maxSize;
64 int _maxSize = 0;
65
66 /// Current workload.
670 int get workload => stats.fold<int>(0, (p, w) => p + w.workload);
68
69 /// Maximum workload.
700 int get maxWorkload => fullStats.fold<int>(
710 0, (p, s) => (p >= s.maxWorkload) ? p : s.maxWorkload);
72
73 /// Total workload.
740 int get totalWorkload =>
750 fullStats.fold<int>(0, (p, s) => p + s.totalWorkload);
76
77 /// Number of errors.
780 int get totalErrors => fullStats.fold<int>(0, (p, s) => p + s.totalErrors);
79
802 Future<void> _provisionWorkers(int count) async {
814 if (_workers.length < count) {
822 final tasks = <Future>[];
833 while (_workers.length < count) {
84 final poolWorker =
856 _PoolWorker(_workerFactory(), concurrencySettings.maxParallel);
863 _workers.add(poolWorker);
874 tasks.add(poolWorker.worker.start());
88 }
893 await Future.wait(tasks);
904 if (_workers.length > _maxSize) {
913 _maxSize = _workers.length;
92 }
93 }
941 }
95
96 /// Ensure at least [ConcurrencySettings.minWorkers] workers are started
97 /// (defaulting to 1 if [ConcurrencySettings.minWorkers] is zero).
982 Future start() {
992 _stopped = false;
1004 return _provisionWorkers(concurrencySettings.min(0));
1011 }
102
1032 int _removeWorker(_PoolWorker poolWorker, bool force) {
1046 if (force || _workers.length > concurrencySettings.minWorkers) {
1053 poolWorker.worker.stop();
1063 _workers.remove(poolWorker);
1075 _deadWorkerStats.add(poolWorker.worker.stats);
1081 return 1;
109 } else {
1101 return 0;
111 }
1121 }
113
114 /// Stop idle pool workers matching the [predicate].
115 /// If [predicate] is null or not provided, all workers will be stopped.
116 /// Stopping a worker does not interrupt or cancel processing. Workers will
117 /// complete pending tasks before shutting down. In the meantime, they will
118 /// not receive any new workload.
119 /// Returns the number of workers that have been stopped.
1202 int stop([bool Function(W worker)? predicate]) {
1211 List<_PoolWorker<W>> targets;
122 bool force;
1231 if (predicate != null) {
124 // kill workers that are idle and satisfy the predicate
1258 targets = _workers.where((w) => w.isIdle && predicate(w.worker)).toList();
1261 force = false;
127 } else {
128 // kill workers while keeping enough workers alive to process pending tasks
1296 targets = _workers.skip(_queue.length).toList();
1302 _stopped = true;
131 force = true;
132 }
133 var stopped = 0;
1343 for (var poolWorker in targets) {
1353 stopped += _removeWorker(poolWorker, force);
136 }
1371 return stopped;
1381 }
139
1401 final _queue = Queue<WorkerTask>();
1411 final _executing = <int, WorkerTask>{};
142
143 /// Gets remaining workload
1444 int get pendingWorkload => _queue.length;
145
1462 WorkerTask<T, W> _enqueue<T>(WorkerTask<T, W> task) {
1472 if (_stopped) {
1482 throw newSquadronError(
149 'the pool cannot accept new requests because it is stopped');
150 }
1513 _queue.addLast(task);
1522 _schedule();
1531 return task;
1541 }
155
156 /// Registers and schedules a [task] that returns a single value.
1570 @Deprecated('use execute() instead')
158 Future<T> compute<T>(Future<T> Function(W worker) task,
159 {PerfCounter? counter}) =>
1600 execute(task, counter: counter);
161
162 /// Registers and schedules a [task] that returns a single value.
163 /// Returns a future that completes with the task's value.
1642 Future<T> execute<T>(Future<T> Function(W worker) task,
165 {PerfCounter? counter}) =>
1663 scheduleTask(task, counter: counter).value;
167
168 /// Registers and schedules a [task] that returns a stream of values.
169 /// Returns a stream containing the task's values.
1702 Stream<T> stream<T>(Stream<T> Function(W worker) task,
171 {PerfCounter? counter}) =>
1723 scheduleStream(task, counter: counter).stream;
173
174 /// Registers and schedules a [task] that returns a single value.
175 /// Returns a [ValueTask]<T>.
1762 ValueTask<T> scheduleTask<T>(Future<T> Function(W worker) task,
177 {PerfCounter? counter}) =>
1783 _enqueue<T>(WorkerTask.value(task, counter));
179
180 /// Registers and schedules a [task] that returns a stream of values.
181 /// Returns a [StreamTask]<T>.
1822 StreamTask<T> scheduleStream<T>(Stream<T> Function(W worker) task,
183 {PerfCounter? counter}) =>
1843 _enqueue<T>(WorkerTask.stream(task, counter));
185
186 Timer? _timer;
187
188 /// The scheduler.
189 ///
190 /// Steps:
191 /// 1. remove stopped workers.
192 /// 2. remove cancelled tasks.
193 /// 3. if the task queue is not empty:
194 /// (a) instantiate up to [maxWorkers] workers (if [maxWorkers] is zero, instanciate as many workers as there are pending tasks).
195 /// (b) find max capacity available in the pool
196 /// (c) distribute tasks to workers starting with workers with highest [PoolWorker.capacity], as long as [PoolWorker.capacity] > 0.
1972 void _schedule() {
1983 if (_timer?.isActive ?? false) {
199 // ignore if the last scheduling request has not executed yet
2001 return;
201 }
2024 _timer = Timer(Duration.zero, () {
2033 _workers.removeWhere(_PoolWorker.isStopped);
2045 _queue.removeWhere((t) => t.isCancelled);
2056 if (_stopped && _queue.isEmpty && _executing.isEmpty) {
2062 stop();
2072 } else if (_queue.isNotEmpty) {
2083 scheduleMicrotask(() {
2098 _provisionWorkers(concurrencySettings.max(_queue.length)).then((_) {
210 int maxCapacity;
2115 while (_queue.isNotEmpty && (maxCapacity = _sortAndGetMaxCapacity()) > 0) {
2122 maxCapacity -= 1;
2135 for (var idx = 0; idx < _workers.length; idx++) {
2143 final w = _workers[idx];
2157 if (_queue.isEmpty || w.capacity == 0 || w.capacity < maxCapacity) {
2161 break;
217 }
2183 final task = _queue.removeFirst();
2194 _executing[task.hashCode] = task;
2204 w.run(task).whenComplete(() {
2214 _executing.remove(task.hashCode);
2222 _schedule();
2231 });
224 }
225 }
2263 }).catchError((ex) {
2273 Squadron.severe('provisionning workers failed with error $ex');
2283 while (_queue.isNotEmpty) {
2293 final task = _queue.removeFirst();
2302 task.cancel('provisionning workers failed');
231 }
2321 });
2331 });
234 }
2351 });
2361 }
237
2381 int _sortAndGetMaxCapacity() {
2393 _workers.sort(_PoolWorker.compareCapacityDesc);
2404 return _workers.first.capacity;
241 }
242
243 /// Task cancellation. If a specific [task] is provided, only this task will be cancelled.
244 /// Otherwise, all tasks registered with the [WorkerPool] are cancelled.
2452 void cancel([Task? task, String? message]) {
2461 if (task != null) {
2474 WorkerTask? workerTask = _executing.remove(task.hashCode);
2481 if (workerTask == null) {
2494 _queue.removeWhere((t) {
2502 if (t == task) {
2511 workerTask = t;
2521 return true;
253 } else {
2541 return false;
255 }
2561 });
257 }
2582 workerTask?.cancel(message);
259 } else {
2606 final cancelled = _executing.values.followedBy(_queue).toList();
2613 _executing.clear();
2623 _queue.clear();
2633 for (var task in cancelled) {
2642 task.cancel(message);
265 }
266 }
2671 }
268
269 /// Worker statistics.
2704 Iterable<WorkerStat> get stats => _workers.map(_PoolWorker.getStats);
271
272 /// Full worker statistics.
2730 Iterable<WorkerStat> get fullStats => _deadWorkerStats.followedBy(stats);
274
275 /// Worker pools do not need an [operations] map.
276 @override
2771 final Map<int, CommandHandler> operations = WorkerService.noOperations;
278}
Choose Features