streamQueryBatched method
Runs sql in the worker using native batched streaming.
This path uses odbc_stream_start_batched + odbc_stream_fetch,
yielding chunks progressively. maxBufferBytes caps internal pending
bytes for message framing.
Implementation
Stream<ParsedRowBuffer> streamQueryBatched(
int connectionId,
String sql, {
int fetchSize = 1000,
int chunkSize = 64 * 1024,
int? maxBufferBytes,
}) async* {
final streamId = await _streamStartBatched(
connectionId,
sql,
fetchSize: fetchSize,
chunkSize: chunkSize,
);
if (streamId == 0) {
final workerError = await _safeGetWorkerError();
throw AsyncError(
code: AsyncErrorCode.queryFailed,
message: workerError ?? 'Failed to start batched stream',
);
}
var pending = BytesBuilder(copy: false);
final limit = maxBufferBytes;
try {
while (true) {
final fetched = await _streamFetch(streamId);
if (!fetched.success) {
final workerError = fetched.error ?? await _safeGetWorkerError();
throw AsyncError(
code: AsyncErrorCode.queryFailed,
message: workerError ?? 'Batched stream fetch failed',
);
}
final data = fetched.data;
if (data != null && data.isNotEmpty) {
pending.add(data);
if (limit != null && pending.length > limit) {
throw const AsyncError(
code: AsyncErrorCode.queryFailed,
message: 'Streaming buffer exceeded maxBufferBytes',
);
}
while (pending.length >= BinaryProtocolParser.headerSize) {
final all = pending.toBytes();
final msgLen = BinaryProtocolParser.messageLengthFromHeader(all);
if (all.length < msgLen) {
break;
}
final msg = all.sublist(0, msgLen);
yield BinaryProtocolParser.parse(msg);
final remainder = all.sublist(msgLen);
pending = BytesBuilder(copy: false);
if (remainder.isNotEmpty) {
pending.add(remainder);
}
}
}
if (!fetched.hasMore) {
break;
}
}
if (pending.length > 0) {
throw const FormatException(
'Leftover bytes after stream; expected complete protocol messages',
);
}
} finally {
await _streamClose(streamId);
}
}