getBatched<_Model extends _RepositoryModel> method
Future<List<_Model> >
getBatched<_Model extends _RepositoryModel>({
- int batchSize = 50,
- OfflineFirstGetPolicy policy = OfflineFirstGetPolicy.awaitRemoteWhenNoneExist,
- Query? query,
- bool seedOnly = false,
Get all results in series of batchSizes (defaults to 50).
Useful for large queries or remote results.
batchSize will map to the query's limit, and the query's pagination number will be
incremented in query.providerArgs['offset']. The endpoint for _Model should expect these
arguments. The stream will recurse until the return size does not equal batchSize.
seedOnly does not load data from SQLite after inserting records. Association queries
can be expensive for large datasets, making deserialization a significant hit when the result
is ignorable (e.g. eager loading). Defaults to false.
Implementation
Future<List<_Model>> getBatched<_Model extends _RepositoryModel>({
int batchSize = 50,
OfflineFirstGetPolicy policy = OfflineFirstGetPolicy.awaitRemoteWhenNoneExist,
Query? query,
bool seedOnly = false,
}) async {
final withPolicy = applyPolicyToQuery(query, get: policy);
query = withPolicy ?? Query();
final queryWithLimit = query.copyWith(
providerArgs: {...query.providerArgs, 'limit': batchSize},
);
final total = <_Model>[];
/// Retrieve up to [batchSize] starting at [offset]. Recursively retrieves the next
/// [batchSize] until no more results are retrieved.
Future<List<_Model>> getFrom(int offset) async {
// add offset to the existing query
final recursiveQuery = queryWithLimit.copyWith(
providerArgs: {...queryWithLimit.providerArgs, 'offset': offset},
);
final results = await get<_Model>(
query: recursiveQuery,
policy: policy,
seedOnly: seedOnly,
);
total.addAll(results);
// if results match the batchSize, increase offset and get again
if (results.length == batchSize) {
return await getFrom(offset + batchSize);
}
return total;
}
return await getFrom(0);
}