lora_sdk 1.0.3
lora_sdk: ^1.0.3 copied to clipboard
LoraSDK is a Flutter package that enables easy integration of local Large Language Models (LLM) into your Flutter applications. It provides a streamlined interface for managing and running LLM models [...]
Lora SDK #
LoraSDK is a Flutter package that enables easy integration of local Large Language Models (LLM) into your Flutter applications. It provides a streamlined interface for managing and running LLM models locally on device.
Features #
- Local LLM model execution
- Streaming text generation
- Model download and warmup progress monitoring
- Built-in state management
- Performance metrics collection
- Error handling
Installation #
run the following commands:
flutter pub add lora_sdk
And add these dependencies to your pubspec.yaml:
dependencies:
lora_sdk: ^1.0.0
fllama:
git:
url: https://github.com/Telosnex/fllama.git
ref: main
Getting Started #
Initialize SDK #
final myLlmProvider = FllamaWrapper();
final sdk = await LoraSdk.initialize(
licenseKey: 'your-license-key',
llmProvider: myLlmProvider,
);
```
### Basic Usage
```dart
// Download model if not already downloaded
await sdk.downloadModel(
onProgress: (progress) => print('Download progress: ${progress * 100}%'),
onError: (error) => print('Error: $error'),
);
// Warmup the model
await sdk.warmup(
onProgress: (progress) => print('Warmup progress: ${progress * 100}%'),
onError: (error) => print('Error: $error'),
);
// Generate text
final response = await sdk.generateResponse("Your input text");
print(response);
// Or use streaming response
sdk.generateStreamResponse("Your input text")
.listen((chunk) => print(chunk));
Complete Example #
Here's a complete example showing how to integrate LoraSDK in a Flutter application:
main.dart #
import 'package:flutter/material.dart';
import 'package:lora_sdk/lora_sdk.dart';
import 'llmprovider_implementation.dart';
void main() => runApp(const MaterialApp(home: SdkTestPage()));
class SdkTestPage extends StatefulWidget {
const SdkTestPage({super.key});
@override
State<SdkTestPage> createState() => _SdkTestPageState();
}
class _SdkTestPageState extends State<SdkTestPage> {
LoraSdk? _sdk;
String _streamResponse = '';
String _blockResponse = '';
final TextEditingController _inputController = TextEditingController();
@override
void initState() {
super.initState();
_initializeSdk();
}
Future<void> _initializeSdk() async {
try {
final myLlmProvider = FllamaWrapper();
final sdk = await LoraSdk.initialize(
licenseKey: '2920-JI06-HXIT-AZ1B-7BX6-QNQC',
llmProvider: myLlmProvider,
);
setState(() => _sdk = sdk);
} catch (e) {
_showError('Initialization failed: $e');
}
}
Future<void> _downloadModel() async {
if (_sdk == null) return;
try {
await _sdk!.downloadModel(
onProgress: (_) {},
onError: _showError,
);
} catch (e) {
_showError('Download failed: $e');
}
}
Future<void> _warmup() async {
if (_sdk == null) return;
try {
await _sdk!.warmup(
onProgress: (_) {},
onError: _showError,
);
} catch (e) {
_showError('Warmup failed: $e');
}
}
Future<void> _generateStreamResponse() async {
if (_sdk == null || _inputController.text.isEmpty) return;
setState(() => _streamResponse = '');
try {
await for (final chunk
in _sdk!.generateStreamResponse(_inputController.text)) {
setState(() => _streamResponse += chunk);
}
} catch (e) {
_showError('Stream generation failed: $e');
}
}
Future<void> _generateBlockResponse() async {
if (_sdk == null || _inputController.text.isEmpty) return;
setState(() => _blockResponse = '');
try {
final response = await _sdk!.generateResponse(_inputController.text);
setState(() => _blockResponse = response);
} catch (e) {
_showError('Block generation failed: $e');
}
}
void _showError(String error) {
ScaffoldMessenger.of(context).showSnackBar(SnackBar(content: Text(error)));
}
@override
Widget build(BuildContext context) {
if (_sdk == null) {
return Scaffold(
appBar: AppBar(title: const Text('Lora SDK Test')),
body: const Center(child: CircularProgressIndicator()),
);
}
return Scaffold(
appBar: AppBar(title: const Text('Lora SDK Test')),
body: Padding(
padding: const EdgeInsets.all(16),
child: Column(
crossAxisAlignment: CrossAxisAlignment.stretch,
children: [
ValueListenableBuilder<LoraState>(
valueListenable: _sdk!.loraState,
builder: (context, state, _) {
return Column(
crossAxisAlignment: CrossAxisAlignment.stretch,
children: [
Text('Status: ${_getStatusMessage(state)}'),
if (state.downloadProgress != null ||
state.warmupProgress != null)
LinearProgressIndicator(
value: state.downloadProgress ?? state.warmupProgress,
),
const SizedBox(height: 16),
ElevatedButton(
onPressed:
(state.modelStatus != ModelStatus.downloading &&
!state.isModelDownloaded &&
state.isInitialized)
? _downloadModel
: null,
child: const Text('Download Model'),
),
const SizedBox(height: 8),
ElevatedButton(
onPressed: state.modelStatus != ModelStatus.warming &&
state.isModelDownloaded &&
!state.isWarmedUp
? _warmup
: null,
child: const Text('Start Warmup'),
),
const SizedBox(height: 16),
TextField(
controller: _inputController,
decoration: const InputDecoration(
border: OutlineInputBorder(),
labelText: 'Input Text',
),
),
const SizedBox(height: 8),
Row(
children: [
Expanded(
child: ElevatedButton(
onPressed: state.isWarmedUp
? _generateStreamResponse
: null,
child: const Text('Generate Stream Response'),
),
),
const SizedBox(width: 8),
Expanded(
child: ElevatedButton(
onPressed: state.isWarmedUp
? _generateBlockResponse
: null,
child: const Text('Generate Block Response'),
),
),
],
),
],
);
},
),
const SizedBox(height: 16),
Expanded(
child: DefaultTabController(
length: 2,
child: Column(
children: [
const TabBar(
labelColor: Colors.blue,
tabs: [
Tab(text: 'Stream Response'),
Tab(text: 'Block Response'),
],
),
Expanded(
child: TabBarView(
children: [
SingleChildScrollView(
child: Padding(
padding: const EdgeInsets.all(8.0),
child: Text(_streamResponse),
),
),
SingleChildScrollView(
child: Padding(
padding: const EdgeInsets.all(8.0),
child: Text(_blockResponse),
),
),
],
),
),
],
),
),
),
],
),
),
);
}
String _getStatusMessage(LoraState state) {
switch (state.modelStatus) {
case ModelStatus.notInitialized:
return 'SDK initialization required';
case ModelStatus.initializing:
return 'Initializing SDK...';
case ModelStatus.downloading:
return 'Downloading model... ${(state.downloadProgress! * 100).toInt()}%';
case ModelStatus.warming:
return 'Warming up... ${(state.warmupProgress! * 100).toInt()}%';
case ModelStatus.ready:
if (!state.isModelDownloaded) {
return 'SDK initialized. Model download required';
} else if (!state.isWarmedUp) {
return 'Model downloaded. Warmup required';
} else {
return 'Ready';
}
case ModelStatus.error:
return 'Error: ${state.errorMessage}';
}
}
@override
void dispose() {
_inputController.dispose();
super.dispose();
}
}
llmprovider_implementation.dart #
import 'package:lora_sdk/lora_sdk.dart' as sdk;
import 'package:fllama/fllama.dart';
class FllamaWrapper implements sdk.LLMInterface<sdk.OpenAiRequest> {
OpenAiRequest _convertRequest(
int maxTokens,
List<sdk.Message> messages,
int numGpuLayers,
String modelPath,
double frequencyPenalty,
double presencePenalty,
double topP,
int contextSize,
double temperature,
Function(String)? logger,
) {
return OpenAiRequest(
maxTokens: maxTokens,
messages:
messages.map((m) => Message(_convertRole(m.role), m.text)).toList(),
numGpuLayers: numGpuLayers,
modelPath: modelPath,
frequencyPenalty: frequencyPenalty,
presencePenalty: presencePenalty,
topP: topP,
contextSize: contextSize,
temperature: temperature,
logger: logger,
);
}
Role _convertRole(sdk.Role role) {
switch (role) {
case sdk.Role.assistant:
return Role.assistant;
case sdk.Role.system:
return Role.system;
case sdk.Role.user:
return Role.user;
}
}
@override
Future<int> chat(
sdk.OpenAiRequest request, Function(String, bool) onResponse) {
final fllamaRequest = _convertRequest(
request.maxTokens,
request.messages,
request.numGpuLayers,
request.modelPath,
request.frequencyPenalty,
request.presencePenalty,
request.topP,
request.contextSize,
request.temperature,
request.logger,
);
return fllamaChat(fllamaRequest, onResponse);
}
@override
Future<int> chatMlcWeb(sdk.OpenAiRequest request,
Function(double, double) onProgress, Function(String, bool) onResponse) {
final fllamaRequest = _convertRequest(
request.maxTokens,
request.messages,
request.numGpuLayers,
request.modelPath,
request.frequencyPenalty,
request.presencePenalty,
request.topP,
request.contextSize,
request.temperature,
request.logger,
);
return fllamaChatMlcWeb(fllamaRequest, onProgress, onResponse);
}
@override
void cancelInference(int requestId) {
fllamaCancelInference(requestId);
}
}
State Management #
The SDK provides a LoraState class to track the current state:
enum ModelStatus {
notInitialized, // Model not initialized
initializing, // Model is initializing
ready, // Model is ready for use
downloading, // Model is downloading
warming, // Model is warming up
error // Error state
}
class LoraState {
final ModelStatus modelStatus;
final bool isInitialized; // SDK not initialized
final bool isWarmedUp; // Whether warmup is completed
final bool isModelDownloaded; // Whether model is downloaded
final double? downloadProgress; // Download progress
final double? warmupProgress; // Warmup progress
final String? errorMessage; // Error message
}
Monitor state changes using ValueListenableBuilder:
ValueListenableBuilder<LoraState>(
valueListenable: sdk.loraState,
builder: (context, state, _) {
return Text('Current status: ${state.modelStatus}');
},
);
License #
This SDK requires a license key to operate. You can obtain your license key by logging into our website:
- Visit our website
- Log in to your account (or create one if you haven't already)
- Navigate to the dashboard
- Find your license key in the "API Keys" section
Each license key is tied to your account and has specific usage limitations. Please refer to our website for detailed licensing terms and conditions.
Requirements #
- Flutter SDK: >=3.0.0
- Dart SDK: >=3.0.0
- iOS: >=13.0
- Android: minSdk >=23
- Supported platforms: iOS, Android
Additional Information #
For bug reports and feature requests, please file an issue on our GitHub repository.