dart_ollama 0.1.5 copy "dart_ollama: ^0.1.5" to clipboard
dart_ollama: ^0.1.5 copied to clipboard

discontinuedreplaced by: llm_ollama

A Dart package for interacting with Ollama and ChatGPT APIs. Supports streaming chat responses, tool/function calling, image support, and both Ollama and ChatGPT backends.

example/main.dart

import 'package:dart_ollama/dart_ollama.dart';

/// Basic Chat Example
///
/// This example demonstrates:
/// - Setting up a chat repository
/// - Pulling models if they don't exist
/// - Streaming chat responses with thinking
/// - Displaying progress bars for model downloads
Future<void> main() async {
  print('šŸ’¬ Dart Ollama Basic Chat Example\n');

  // Initialize repositories
  final chatRepository = OllamaChatRepository(
    baseUrl: 'http://localhost:11434',
  );
  final ollamaRepository = OllamaRepository(baseUrl: 'http://localhost:11434');

  const model = 'qwen3:0.6b'; // Small model with thinking and tool support

  // Ensure the model is available
  await _ensureModelAvailable(ollamaRepository, model);

  print('\nšŸš€ Starting conversation...\n');

  // Example conversation with thinking enabled
  await _runBasicChatExample(chatRepository, model);

  print('\n${'=' * 50}\n');

  // Example without thinking to show the difference
  await _runChatWithoutThinking(chatRepository, model);

  print('\nāœ… Examples completed successfully!');
}

/// Demonstrates chat with thinking enabled
Future<void> _runBasicChatExample(
  OllamaChatRepository chatRepository,
  String model,
) async {
  print('🧠 Example 1: Chat with Thinking Mode');
  print('Question: Why is the sky blue?');

  final stream = chatRepository.streamChat(
    model,
    messages: [
      LLMMessage(
        role: LLMRole.system,
        content:
            'You are a helpful assistant. Answer questions accurately and concisely. '
            'Show your reasoning process when thinking mode is enabled.',
      ),
      LLMMessage(role: LLMRole.user, content: 'Why is the sky blue?'),
    ],
    think: true, // Enable thinking mode to see the model's reasoning
  );

  String thinkingContent = '';
  String responseContent = '';

  print('\nšŸ¤– Streaming response...\n');

  await for (final chunk in stream) {
    // Collect thinking content (reasoning process)
    if (chunk.message?.thinking != null) {
      thinkingContent += chunk.message!.thinking!;
    }

    // Collect and display response content as it streams
    if (chunk.message?.content != null) {
      final content = chunk.message!.content!;
      responseContent += content;
      print(content);
    }
  }

  print('\n\nšŸ¤” Model\'s thinking process:');
  print(
    thinkingContent.isEmpty
        ? '(No thinking output available)'
        : thinkingContent,
  );

  print('\nšŸ“ Complete response:');
  print(responseContent);
}

/// Demonstrates chat without thinking for comparison
Future<void> _runChatWithoutThinking(
  OllamaChatRepository chatRepository,
  String model,
) async {
  print('šŸ’­ Example 2: Chat without Thinking Mode');
  print('Question: What is machine learning?');

  final stream = chatRepository.streamChat(
    model,
    messages: [
      LLMMessage(
        role: LLMRole.system,
        content:
            'You are a helpful assistant. Provide clear, concise explanations.',
      ),
      LLMMessage(role: LLMRole.user, content: 'What is machine learning?'),
    ],
    think: false, // Disable thinking mode for faster, direct responses
  );

  String responseContent = '';

  print('\nšŸ¤– Streaming response...\n');

  await for (final chunk in stream) {
    if (chunk.message?.content != null) {
      final content = chunk.message!.content!;
      responseContent += content;
      print(content);
    }
  }

  print('\n\nšŸ“ Complete response:');
  print(responseContent);
}

/// Ensures the specified model is available, pulling it if necessary
Future<void> _ensureModelAvailable(
  OllamaRepository repository,
  String modelName,
) async {
  try {
    final models = await repository.models();
    if (!models.any((model) => model.name == modelName)) {
      print('šŸ“„ Model $modelName not found. Pulling...');
      print('ā³ This may take a few minutes for the first download.');

      final modelStream = repository.pullModel(modelName);
      await for (final progress in modelStream) {
        final statusLine = progress.status;
        if (progress.total != null && progress.completed != null) {
          final percentage = (progress.progress * 100).toStringAsFixed(1);
          final bar = _buildProgressBar(progress.progress, 30);
          print('$statusLine $bar $percentage%');
        } else {
          print(statusLine);
        }
      }
      print('\nāœ… Model $modelName downloaded successfully!');
    } else {
      print('āœ… Model $modelName is already available.');
    }
  } catch (e) {
    print('āŒ Error checking/pulling model: $e');
    print(
      'šŸ’” Make sure Ollama is running and accessible at http://localhost:11434',
    );
    throw Exception('Model setup failed: $e');
  }
}

/// Creates a visual progress bar for model downloads
String _buildProgressBar(double progress, int length) {
  final filledLength = (progress * length).floor();
  final emptyLength = length - filledLength;
  return '[${'=' * filledLength}${' ' * emptyLength}]';
}
1
likes
150
points
18
downloads

Publisher

unverified uploader

Weekly Downloads

A Dart package for interacting with Ollama and ChatGPT APIs. Supports streaming chat responses, tool/function calling, image support, and both Ollama and ChatGPT backends.

Documentation

API reference

License

MIT (license)

Dependencies

http

More

Packages that depend on dart_ollama