compress static method

Future<CompressedContext> compress({
  1. required List<ChunkSearchResult> chunks,
  2. CompressionLevel level = CompressionLevel.balanced,
  3. int maxTokens = 2000,
  4. String language = 'ko',
})

Compress chunks for LLM context within a token budget.

chunks - Search result chunks to compress. level - Compression aggressiveness. maxTokens - Target maximum tokens (uses char/4 estimate). language - Language for stopword filtering ("ko" or "en").

Implementation

static Future<CompressedContext> compress({
  required List<ChunkSearchResult> chunks,
  CompressionLevel level = CompressionLevel.balanced,
  int maxTokens = 2000,
  String language = 'ko',
}) async {
  if (chunks.isEmpty) {
    return const CompressedContext(
      text: '',
      originalChars: 0,
      compressedChars: 0,
      ratio: 1.0,
      estimatedTokensSaved: 0,
      includedChunks: [],
    );
  }

  // Combine all chunk content
  final originalText = chunks.map((c) => c.content).join('\n\n');
  final originalChars = originalText.length;

  // Convert level to int for Rust
  final levelInt = level.index;

  // Calculate max chars from token budget (~4 chars per token)
  final maxChars = maxTokens * 4;

  // Create compression options
  final options = rust.CompressionOptions(
    removeStopwords: false, // Disabled - damages context
    removeDuplicates: true,
    language: language,
    level: levelInt,
  );

  // Call Rust compression
  final result = await rust.compressText(
    text: originalText,
    maxChars: maxChars,
    options: options,
  );

  // Calculate token savings (rough estimate)
  final originalTokens = (originalChars / 4).ceil();
  final compressedTokens = (result.compressedChars / 4).ceil();
  final tokensSaved = originalTokens - compressedTokens;

  return CompressedContext(
    text: result.text,
    originalChars: result.originalChars,
    compressedChars: result.compressedChars,
    ratio: result.ratio,
    estimatedTokensSaved: tokensSaved,
    includedChunks: chunks,
  );
}