searchHybridWithContext method
Hybrid search with context assembly for LLM.
Similar to search but uses hybrid (vector + BM25) search.
adjacentChunks - Number of adjacent chunks to include before/after each
matched chunk (default: 0). Setting this to 1 will include the chunk
before and after each matched chunk, helping with long articles.
singleSourceMode - If true, only include chunks from the most relevant source.
Implementation
Future<RagSearchResult> searchHybridWithContext(
String query, {
int topK = 10,
int tokenBudget = 2000,
ContextStrategy strategy = ContextStrategy.relevanceFirst,
double vectorWeight = 0.2,
double bm25Weight = 0.8,
List<int>? sourceIds,
int adjacentChunks = 0,
bool singleSourceMode = false,
}) async {
// 1. Get hybrid search results
final hybridResults = await searchHybrid(
query,
topK: topK,
vectorWeight: vectorWeight,
bm25Weight: bm25Weight,
sourceIds: sourceIds,
);
// 2. Convert to ChunkSearchResult format for context building
// Note: Hybrid search returns content directly, so we create minimal chunks
var chunks = hybridResults
.map(
(r) => ChunkSearchResult(
chunkId: r.docId,
sourceId: r.sourceId,
content: r.content,
chunkIndex: r.chunkIndex, // Now available from Rust!
chunkType: 'general', // Hybrid search doesn't return chunk type
similarity: r.score, // RRF score as similarity
metadata: r.metadata,
),
)
.toList();
// 3. Filter to single source FIRST (before adjacent expansion)
if (singleSourceMode && chunks.isNotEmpty) {
chunks = _filterToMostRelevantSource(chunks, query);
}
// 4. Expand with adjacent chunks (only for the selected source)
if (adjacentChunks > 0 && chunks.isNotEmpty) {
chunks = await _expandWithAdjacentChunks(chunks, adjacentChunks);
}
// 5. Assemble context (pass singleSourceMode to skip headers when single source)
final context = ContextBuilder.build(
searchResults: chunks,
tokenBudget: tokenBudget,
strategy: strategy,
singleSourceMode: singleSourceMode, // Pass through to skip headers
);
return RagSearchResult(chunks: chunks, context: context);
}