ml_linalg 8.0.0 copy "ml_linalg: ^8.0.0" to clipboard
ml_linalg: ^8.0.0 copied to clipboard

outdated

SIMD-based linear algebra with dart for machine learning purposes

Build Status Coverage Status pub package Gitter Chat

SIMD-based Linear algebra with Dart

Table of contents

Vectors #

A couple of words about the underlying vector architecture

All vector operations are supported by SIMD (single instruction, multiple data) computation architecture. Actually, the main purpose of the library - connect such a powerful computation way with the pure math. So, the library contains a high performance SIMD vector class, based on Float32x4 - Float32x4Vector. Most of operations in the vector class are performed in four "threads". This kind of concurrency is reached by special 128-bit processor registers, which are used directly by program code. For better understanding of the topic please read the article. It is also possible to implement Float64x2-based version of vector using existing codebase, but so far there is no need to do so. The class Float32x4Vector is hidden from the library's users. You can create a Float32x4Vector instance via Vector factory (see examples below).

Vector operations examples

At the present moment most common vector operations are implemented:

Vectors sum
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final vector2 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1 + vector2;
  print(result.toList()); // [3.0, 5.0, 7.0, 9.0, 11.0]
Vectors subtraction
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([4.0, 5.0, 6.0, 7.0, 8.0]);
  final vector2 = Vector.from([2.0, 3.0, 2.0, 3.0, 2.0]);
  final result = vector1 - vector2;
  print(result.toList()); // [2.0, 2.0, 4.0, 4.0, 6.0]
Element wise vector by vector multiplication
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final vector2 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1 * vector2;
  print(result.toList()); // [2.0, 6.0, 12.0, 20.0, 30.0]
Element wise vector by vector division
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([6.0, 12.0, 24.0, 48.0, 96.0]);
  final vector2 = Vector.from([3.0, 4.0, 6.0, 8.0, 12.0]);
  final result = vector1 / vector2;
  print(result.toList()); // [2.0, 3.0, 4.0, 6.0, 8.0]
Euclidean norm
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.norm();
  print(result); // sqrt(2^2 + 3^2 + 4^2 + 5^2 + 6^2) = sqrt(90) ~~ 9.48
Manhattan norm
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.norm(Norm.manhattan);
  print(result); // 2 + 3 + 4 + 5 + 6 = 20.0
Mean value
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.mean();
  print(result); // (2 + 3 + 4 + 5 + 6) / 5 = 4.0
Sum of all vector elements
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.sum();
  print(result); // 2 + 3 + 4 + 5 + 6 = 20.0 (equivalent to Manhattan norm)
Dot product of two vectors
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final vector2 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.dot(vector2);
  print(result); // 1.0 * 2.0 + 2.0 * 3.0 + 3.0 * 4.0 + 4.0 * 5.0 + 5.0 * 6.0 = 70.0
Sum of a vector and a scalar
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final scalar = 5.0;
  final result = vector1 + scalar;
  print(result.toList()); // [6.0, 7.0, 8.0, 9.0, 10.0]
Subtraction of a scalar from a vector
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final scalar = 5.0;
  final result = vector1 - scalar;
  print(result.toList()); // [-4.0, -3.0, -2.0, -1.0, 0.0]
Multiplication (scaling) of a vector by a scalar
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final scalar = 5.0;
  final result = vector1 * scalar;
  print(result.toList()); // [5.0, 10.0, 15.0, 20.0, 25.0]
Division (scaling) of a vector by a scalar value
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([25.0, 50.0, 75.0, 100.0, 125.0]);
  final scalar = 5.0;
  final result = vector1.scalarDiv(scalar);
  print(result.toList()); // [5.0, 10.0, 15.0, 20.0, 25.0]
Euclidean distance between two vectors
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final vector2 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.distanceTo(vector2);
  print(result); // ~~2.23
Manhattan distance between two vectors
  import 'package:ml_linalg/linalg.dart';

  final vector1 = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final vector2 = Vector.from([2.0, 3.0, 4.0, 5.0, 6.0]);
  final result = vector1.distanceTo(vector2, Norm.manhattan);
  print(result); // 5.0
Vector normalization using Euclidean norm
  import 'package:ml_linalg/linalg.dart';

  final vector = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final result = vector.normalize(Norm.euclidean);
  print(result); // [0.134, 0.269, 0.404, 0.539, 0.674]
Vector normalization using Manhattan norm
  import 'package:ml_linalg/linalg.dart';

  final vector = Vector.from([1.0, -2.0, 3.0, -4.0, 5.0]);
  final result = vector.normalize(Norm.manhattan);
  print(result); // [0.066, -0.133, 0.200, -0.266, 0.333]
Vector rescaling (min-max normalization)
  import 'package:ml_linalg/linalg.dart';

  final vector = Vector.from([1.0, -2.0, 3.0, -4.0, 5.0, 0.0]);
  final result = vector.rescale();
  print(result); // [0.555, 0.222, 0.777, 0.0, 1.0, 0.444]
Fast map

Performs mapping from one vector to another in efficient way (using simd computations)

  import 'package:ml_linalg/linalg.dart';

  final vector = Vector.from([1.0, 2.0, 3.0, 4.0, 5.0]);
  final result = vector.fastMap<Float32x4>((Float32x4 element, int offsetStart, int offsetEnd) {
    // offsetStart - start index for the current vectorized element, e.g. if `element` is second in the inner collection,
    // the offsetStart will be 4 (because Float32x4 contains 4 elements)
    // offsetEnd - end index for the current vectorized element, e.g. if `element` is second in the inner collection,
    // the offsetStart will be 7
    return element.scale(2.0);
  });
  print(result); // [2.0, 4.0, 6.0, 8.0, 10.0]

Matrices #

Also, a class for matrix is available. It is based on Float32x4 and Float32x4Vector types.

Matrix operations examples

Sum of a matrix and another matrix
import 'package:ml_linalg/linalg.dart';

final matrix1 = Matrix.from([
  [1.0, 2.0, 3.0, 4.0],
  [5.0, 6.0, 7.0, 8.0],
  [9.0, .0, -2.0, -3.0],
]);
final matrix2 = Matrix.from([
  [10.0, 20.0, 30.0, 40.0],
  [-5.0, 16.0, 2.0, 18.0],
  [2.0, -1.0, -2.0, -7.0],
]);
print(matrix1 + matrix2);
// [
//  [11.0, 22.0, 33.0, 44.0],
//  [0.0, 22.0, 9.0, 26.0],
//  [11.0, -1.0, -4.0, -10.0],
// ];
Sum of a matrix and a scalar
import 'package:ml_linalg/linalg.dart';

final matrix = Matrix.from([
  [1.0, 2.0, 3.0, 4.0],
  [5.0, 6.0, 7.0, 8.0],
  [9.0, .0, -2.0, -3.0],
]);
print(matrix + 7);
//  [
//    [8.0, 9.0, 10.0, 11.0],
//    [12.0, 13.0, 14.0, 15.0],
//    [16.0, 7.0, 5.0, 4.0],
//  ];
Multiplication of a matrix and a vector
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
    [9.0, .0, -2.0, -3.0],
  ]);
  final vector = Vector.from([2.0, 3.0, 4.0, 5.0]);
  final result = matrix * vector;
  print(result); 
  // a vector-column [
  //  [40],
  //  [96],
  //  [-5],
  //]
Multiplication of a matrix and another matrix
  import 'package:ml_linalg/linalg.dart';

  final matrix1 = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
    [9.0, .0, -2.0, -3.0],
  ]);
  final matrix2 = Matrix.from([
    [1.0, 2.0],
    [5.0, 6.0],
    [9.0, .0],
    [-9.0, 1.0],
  ]);
  final result = matrix1 * matrix2;
  print(result);
  //[
  // [2.0, 18.0],
  // [26.0, 54.0],
  // [18.0, 15.0],
  //]
Multiplication of a matrix and a scalar
import 'package:ml_linalg/linalg.dart';

final matrix = Matrix.from([
  [1.0, 2.0, 3.0, 4.0],
  [5.0, 6.0, 7.0, 8.0],
  [9.0, .0, -2.0, -3.0],
]);
print(matrix * 3);
// [
//   [3.0, 6.0, 9.0, 12.0],
//   [15.0, 18.0, 21.0, 24.0],
//   [27.0, .0, -6.0, -9.0],
// ];
Element wise matrices subtraction
import 'package:ml_linalg/linalg.dart';

final matrix1 = Matrix.from([
  [1.0, 2.0, 3.0, 4.0],
  [5.0, 6.0, 7.0, 8.0],
  [9.0, .0, -2.0, -3.0],
]);
final matrix2 = Matrix.from([
  [10.0, 20.0, 30.0, 40.0],
  [-5.0, 16.0, 2.0, 18.0],
  [2.0, -1.0, -2.0, -7.0],
]);
print(matrix1 - matrix2);
// [
//   [-9.0, -18.0, -27.0, -36.0],
//   [10.0, -10.0, 5.0, -10.0],
//   [7.0, 1.0, .0, 4.0],
// ];
Matrix transposition
  import 'package:ml_linalg/linalg.dart';
  
  final matrix = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
    [9.0, .0, -2.0, -3.0],
  ]);
  final result = matrix.transpose();
  print(result);
  //[
  // [1.0, 5.0, 9.0],
  // [2.0, 6.0, .0],
  // [3.0, 7.0, -2.0],
  // [4.0, 8.0, -3.0],
  //]
Matrix row wise reduce
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
  ]); 
  final reduced = matrix.reduceRows((combine, row) => combine + row);
  print(reduced); // [6.0, 8.0, 10.0, 12.0]
Matrix column wise reduce
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [11.0, 12.0, 13.0, 14.0],
    [15.0, 16.0, 17.0, 18.0],
    [21.0, 22.0, 23.0, 24.0],
  ]);
  final result = matrix.reduceColumns((combine, vector) => combine + vector);
  print(result); // [50, 66, 90]
Matrix row wise map
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
  ]); 
  final modifier = Vector.filled(4, 2.0);
  final newMatrix = matrix.rowsMap((row) => row + modifier);
  print(newMatrix); 
  // [
  //  [3.0, 4.0, 5.0, 6.0],
  //  [7.0, 8.0, 9.0, 10.0],
  // ]
Matrix column wise map
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [1.0, 2.0, 3.0, 4.0],
    [5.0, 6.0, 7.0, 8.0],
  ]); 
  final modifier = Vector.filled(2, 2.0);
  final newMatrix = matrix.columnsMap((column) => column + modifier);
  print(newMatrix); 
  // [
  //  [3.0, 4.0, 5.0, 6.0],
  //  [7.0, 8.0, 9.0, 10.0],
  // ]
Submatrix (taking a lower dimension matrix of the current matrix)
  import 'package:ml_linalg/linalg.dart';
  import 'package:xrange/zrange.dart';

  final matrix = Matrix.from([
    [11.0, 12.0, 13.0, 14.0],
    [15.0, 16.0, 17.0, 18.0],
    [21.0, 22.0, 23.0, 24.0],
    [24.0, 32.0, 53.0, 74.0],
  ]);
  final submatrix = matrix.submatrix(rows: ZRange.closedOpen(0, 2));
  print(submatrix);
  // [
  //  [11.0, 12.0, 13.0, 14.0],
  //  [15.0, 16.0, 17.0, 18.0],
  //];
Getting max value of the matrix
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [11.0, 12.0, 13.0, 14.0],
    [15.0, 16.0, 17.0, 18.0],
    [21.0, 22.0, 23.0, 24.0],
    [24.0, 32.0, 53.0, 74.0],
  ]);
  final maxValue = matrix.max();
  print(maxValue);
  // 74.0
Getting min value of the matrix
  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [11.0, 12.0, 13.0, 14.0],
    [15.0, 16.0, 0.0, 18.0],
    [21.0, 22.0, -23.0, 24.0],
    [24.0, 32.0, 53.0, 74.0],
  ]);
  final minValue = matrix.min();
  print(minValue);
  // -23.0

Matrix fast map

Performs mapping from one matrix to another in efficient way (using simd computations)

  import 'package:ml_linalg/linalg.dart';

  final matrix = Matrix.from([
    [11.0, 12.0, 13.0, 14.0],
    [15.0, 16.0, 0.0, 18.0],
    [21.0, 22.0, -23.0, 24.0],
    [24.0, 32.0, 53.0, 74.0],
  ], dtype: Float32x4);
  final newMatrix = matrix.fastMap<Float32x4>((Float32x4 val) => val.scale(3.0));
  print(minValue);
  // [
  //   [33.0, 36.0, 39.0, 42.0],
  //   [45.0, 48.0, 0.0, 54.0],
  //   [63.0, 66.0, -69.0, 72.0],
  //   [72.0, 96.0, 159.0, 222.0],
  // ]

Matrix indexing

The library's matrix interface offers pick method, that supposes to return a new matrix, consisting of different segments of a source matrix (like in Pandas dataframe in Python, e.g. loc method). It's possible to build a new matrix from certain columns and vectors and they should not be necessarily subsequent: for example, it is needed to create a matrix from rows 1, 3, 5 and columns 1 and 3. To do so, it's needed to access the matrix this way:

import 'package:ml_linalg/linalg.dart';
import 'package:xrange/zrange.dart';

final matrix = Float32x4Matrix.from([
//| 1 |         | 3 |                
  [4.0,   8.0,   12.0,   16.0,  34.0], // 1 Range(0, 1)
  [20.0,  24.0,  28.0,   32.0,  23.0],
  [36.0,  .0,   -8.0,   -12.0,  12.0], // 3 Range(2, 3)
  [16.0,  1.0,  -18.0,   3.0,   11.0],
  [112.0, 10.0,  34.0,   2.0,   10.0], // 5 Range(4, 5)
]);
final result = matrix.pick(
  rowRanges: [ZRange.closedOpen(0, 1), ZRange.closedOpen(2, 3), ZRange.closedOpen(4, 5)],
  columnRanges: [ZRange.closedOpen(0, 1), ZRange.closedOpen(2, 3)],
);
print(result);
/*
  [4.0,   12.0],
  [36.0,  -8.0],
  [112.0, 34.0]
*/

Matrix column updating

import 'package:ml_linalg/linalg.dart';

final matrix = Matrix.from([
  [11.0, 12.0, 13.0, 14.0],
  [15.0, 16.0, 0.0, 18.0],
  [21.0, 22.0, -23.0, 24.0],
  [24.0, 32.0, 53.0, 74.0],
], dtype: Float32x4);

matrix.setColumn(0, [1.0, 2.0, 3.0, 4.0]);

print(matrix);
// [
//  [1.0, 12.0, 13.0, 14.0],
//  [2.0, 16.0, 0.0, 18.0],
//  [3.0, 22.0, -23.0, 24.0],
//  [4.0, 32.0, 53.0, 74.0],
// ]

Contacts #

If you have questions, feel free to write me on

71
likes
0
pub points
91%
popularity

Publisher

verified publisherml-algo.com

SIMD-based linear algebra with dart for machine learning purposes

Repository (GitHub)
View/report issues

License

unknown (license)

Dependencies

quiver, xrange

More

Packages that depend on ml_linalg